From 75e934842e1fc10f17acb96438d17761c1f5c6b8 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Tue, 30 Jul 2024 15:43:34 +1000 Subject: [PATCH 01/18] Swap finalized chains based on processed batches (#6203) * Shift from validated to processed --- beacon_node/network/src/sync/range_sync/chain.rs | 12 ++++-------- .../network/src/sync/range_sync/chain_collection.rs | 6 +++--- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/beacon_node/network/src/sync/range_sync/chain.rs b/beacon_node/network/src/sync/range_sync/chain.rs index d63b2f95d80..1abd490b1fa 100644 --- a/beacon_node/network/src/sync/range_sync/chain.rs +++ b/beacon_node/network/src/sync/range_sync/chain.rs @@ -111,9 +111,6 @@ pub struct SyncingChain { /// The current processing batch, if any. current_processing_batch: Option, - /// Batches validated by this chain. - validated_batches: u64, - /// The chain's log. log: slog::Logger, } @@ -161,7 +158,6 @@ impl SyncingChain { attempted_optimistic_starts: HashSet::default(), state: ChainSyncingState::Stopped, current_processing_batch: None, - validated_batches: 0, log: log.new(o!("chain" => id)), } } @@ -182,8 +178,10 @@ impl SyncingChain { } /// Progress in epochs made by the chain - pub fn validated_epochs(&self) -> u64 { - self.validated_batches * EPOCHS_PER_BATCH + pub fn processed_epochs(&self) -> u64 { + self.processing_target + .saturating_sub(self.start_epoch) + .into() } /// Returns the total count of pending blocks in all the batches of this chain @@ -654,7 +652,6 @@ impl SyncingChain { let removed_batches = std::mem::replace(&mut self.batches, remaining_batches); for (id, batch) in removed_batches.into_iter() { - self.validated_batches = self.validated_batches.saturating_add(1); // only for batches awaiting validation can we be sure the last attempt is // right, and thus, that any different attempt is wrong match batch.state() { @@ -1166,7 +1163,6 @@ impl slog::KV for SyncingChain { )?; serializer.emit_usize("batches", self.batches.len())?; serializer.emit_usize("peers", self.peers.len())?; - serializer.emit_u64("validated_batches", self.validated_batches)?; serializer.emit_arguments("state", &format_args!("{:?}", self.state))?; slog::Result::Ok(()) } diff --git a/beacon_node/network/src/sync/range_sync/chain_collection.rs b/beacon_node/network/src/sync/range_sync/chain_collection.rs index 3621a6605af..1217fbf8fed 100644 --- a/beacon_node/network/src/sync/range_sync/chain_collection.rs +++ b/beacon_node/network/src/sync/range_sync/chain_collection.rs @@ -24,7 +24,7 @@ use types::{Epoch, Hash256, Slot}; const PARALLEL_HEAD_CHAINS: usize = 2; /// Minimum work we require a finalized chain to do before picking a chain with more peers. -const MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS: u64 = 10; +const MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS: u64 = 10; /// The state of the long range/batch sync. #[derive(Clone)] @@ -273,8 +273,8 @@ impl ChainCollection { // chains are different, check that they don't have the same number of peers if let Some(syncing_chain) = self.finalized_chains.get_mut(&syncing_id) { if max_peers > syncing_chain.available_peers() - && syncing_chain.validated_epochs() - > MIN_FINALIZED_CHAIN_VALIDATED_EPOCHS + && syncing_chain.processed_epochs() + > MIN_FINALIZED_CHAIN_PROCESSED_EPOCHS { syncing_chain.stop_syncing(); old_id = Some(Some(syncing_id)); From 9b3b73015925a84fbb004f63516d28e45da675ce Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 30 Jul 2024 23:48:28 +1000 Subject: [PATCH 02/18] Avoid acquiring another read lock while holding one to avoid potential deadlock (#6200) * Avoid acquiring another read lock to avoid potential deadlock. --- beacon_node/eth1/src/service.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs index 9cc1da13826..e5d60fac49c 100644 --- a/beacon_node/eth1/src/service.rs +++ b/beacon_node/eth1/src/service.rs @@ -1129,7 +1129,7 @@ impl Service { Ok(BlockCacheUpdateOutcome { blocks_imported, - head_block_number: self.inner.block_cache.read().highest_block_number(), + head_block_number: block_cache.highest_block_number(), }) } } From 0bb2386ff59e41de5dcbdf4bb3b6d5269636bc5d Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 1 Aug 2024 16:46:37 +1000 Subject: [PATCH 03/18] Work around UB in LMDB bindings (#6211) * Work around UB in LMDB bindings --- slasher/src/database/lmdb_impl.rs | 6 +++++- slasher/tests/random.rs | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/slasher/src/database/lmdb_impl.rs b/slasher/src/database/lmdb_impl.rs index 20d89a36fb0..74342968cfa 100644 --- a/slasher/src/database/lmdb_impl.rs +++ b/slasher/src/database/lmdb_impl.rs @@ -165,8 +165,12 @@ impl<'env> Cursor<'env> { } pub fn get_current(&mut self) -> Result, Value<'env>)>, Error> { + // FIXME: lmdb has an extremely broken API which can mutate the SHARED REFERENCE + // `value` after `get_current` is called. We need to convert it to a Vec here in order + // to avoid `value` changing after another cursor operation. I think this represents a bug + // in the LMDB bindings, as shared references should be immutable. if let Some((Some(key), value)) = self.cursor.get(None, None, MDB_GET_CURRENT).optional()? { - Ok(Some((Cow::Borrowed(key), Cow::Borrowed(value)))) + Ok(Some((Cow::Borrowed(key), Cow::Owned(value.to_vec())))) } else { Ok(None) } diff --git a/slasher/tests/random.rs b/slasher/tests/random.rs index 0aaaa63f65c..0ba2986d44b 100644 --- a/slasher/tests/random.rs +++ b/slasher/tests/random.rs @@ -235,3 +235,8 @@ fn no_crash_blocks_example1() { }, ); } + +#[test] +fn no_crash_aug_24() { + random_test(13519442335106054152, TestConfig::default()) +} From 05bc99e67b10c65b6f56bd538f21739926c7818f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Sat, 3 Aug 2024 05:49:09 +0100 Subject: [PATCH 04/18] patch quick-protobuf (#6217) * patch quick-protobuf --- Cargo.lock | 3 +-- Cargo.toml | 5 ++++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 605cb4d2a58..872c6e3368b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6513,8 +6513,7 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quick-protobuf" version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6da84cc204722a989e01ba2f6e1e276e190f22263d0cb6ce8526fcdb0d2e1f" +source = "git+https://github.com/sigp/quick-protobuf.git?rev=681f413312404ab6e51f0b46f39b0075c6f4ebfd#681f413312404ab6e51f0b46f39b0075c6f4ebfd" dependencies = [ "byteorder", ] diff --git a/Cargo.toml b/Cargo.toml index b2957842d59..6d8d9d43bb8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -107,7 +107,7 @@ bytes = "1" clap = { version = "4.5.4", features = ["derive", "cargo", "wrap_help"] } # Turn off c-kzg's default features which include `blst/portable`. We can turn on blst's portable # feature ourselves when desired. -c-kzg = { version = "1", default-features = false } +c-kzg = { version = "1", default-features = false } compare_fields_derive = { path = "common/compare_fields_derive" } criterion = "0.5" delay_map = "0.3" @@ -240,6 +240,9 @@ validator_client = { path = "validator_client" } validator_dir = { path = "common/validator_dir" } warp_utils = { path = "common/warp_utils" } +[patch.crates-io] +quick-protobuf = { git = "https://github.com/sigp/quick-protobuf.git", rev = "681f413312404ab6e51f0b46f39b0075c6f4ebfd" } + [profile.maxperf] inherits = "release" lto = "fat" From 612946b27358ad838e4d66492b084bed76948bbe Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 5 Aug 2024 21:30:48 +1000 Subject: [PATCH 05/18] Downgrade re-org log to INFO (#6220) * Downgrade re-org log to INFO * Update book Co-authored-by: chonghe <44791194+chong-he@users.noreply.github.com> --- beacon_node/beacon_chain/src/canonical_head.rs | 4 ++-- book/src/late-block-re-orgs.md | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index a5d85d56032..40e2f60b0e9 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -48,7 +48,7 @@ use fork_choice::{ }; use itertools::process_results; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; -use slog::{crit, debug, error, warn, Logger}; +use slog::{crit, debug, error, info, warn, Logger}; use slot_clock::SlotClock; use state_processing::AllCaches; use std::sync::Arc; @@ -1212,7 +1212,7 @@ fn detect_reorg( &metrics::FORK_CHOICE_REORG_DISTANCE, reorg_distance.as_u64() as i64, ); - warn!( + info!( log, "Beacon chain re-org"; "previous_head" => ?old_block_root, diff --git a/book/src/late-block-re-orgs.md b/book/src/late-block-re-orgs.md index fc4530589d9..4a00f33aa44 100644 --- a/book/src/late-block-re-orgs.md +++ b/book/src/late-block-re-orgs.md @@ -50,10 +50,10 @@ A pair of messages at `INFO` level will be logged if a re-org opportunity is det > INFO Proposing block to re-org current head head_to_reorg: 0xf64f…2b49, slot: 1105320 -This should be followed shortly after by a `WARN` log indicating that a re-org occurred. This is +This should be followed shortly after by a `INFO` log indicating that a re-org occurred. This is expected and normal: -> WARN Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 +> INFO Beacon chain re-org reorg_distance: 1, new_slot: 1105320, new_head: 0x72791549e4ca792f91053bc7cf1e55c6fbe745f78ce7a16fc3acb6f09161becd, previous_slot: 1105319, previous_head: 0xf64f8e5ed617dc18c1e759dab5d008369767c3678416dac2fe1d389562842b49 In case a re-org is not viable (which should be most of the time), Lighthouse will just propose a block as normal and log the reason the re-org was not attempted at debug level: From f7f0bfc9f2b9eb25f3e111265f29943ad8c47641 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 6 Aug 2024 04:32:56 +0200 Subject: [PATCH 06/18] Skip recursive discovery query if no useful ENRs (#6207) * Skip recursive discovery query if no useful ENRs --- beacon_node/lighthouse_network/src/metrics.rs | 6 ++++++ .../lighthouse_network/src/peer_manager/mod.rs | 16 ++++++++++++++-- 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/beacon_node/lighthouse_network/src/metrics.rs b/beacon_node/lighthouse_network/src/metrics.rs index 9b11fe5a380..85da8dc2112 100644 --- a/beacon_node/lighthouse_network/src/metrics.rs +++ b/beacon_node/lighthouse_network/src/metrics.rs @@ -77,6 +77,12 @@ pub static DISCOVERY_SESSIONS: LazyLock> = LazyLock::new(|| { "The number of active discovery sessions with peers", ) }); +pub static DISCOVERY_NO_USEFUL_ENRS: LazyLock> = LazyLock::new(|| { + try_create_int_counter( + "discovery_no_useful_enrs_found", + "Total number of counts a query returned no useful ENRs to dial", + ) +}); pub static PEERS_PER_CLIENT: LazyLock> = LazyLock::new(|| { try_create_int_gauge_vec( diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 4d3da0c8e4b..6423da56fe2 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -321,6 +321,7 @@ impl PeerManager { /// This function decides whether or not to dial these peers. pub fn peers_discovered(&mut self, results: HashMap>) { let mut to_dial_peers = 0; + let results_count = results.len(); let connected_or_dialing = self.network_globals.connected_or_dialing_peers(); for (enr, min_ttl) in results { // There are two conditions in deciding whether to dial this peer. @@ -352,8 +353,19 @@ impl PeerManager { } } - // Queue another discovery if we need to - self.maintain_peer_count(to_dial_peers); + // The heartbeat will attempt new discovery queries every N seconds if the node needs more + // peers. As an optimization, this function can recursively trigger new discovery queries + // immediatelly if we don't fulfill our peers needs after completing a query. This + // recursiveness results in an infinite loop in networks where there not enough peers to + // reach out target. To prevent the infinite loop, if a query returns no useful peers, we + // will cancel the recursiveness and wait for the heartbeat to trigger another query latter. + if results_count > 0 && to_dial_peers == 0 { + debug!(self.log, "Skipping recursive discovery query after finding no useful results"; "results" => results_count); + metrics::inc_counter(&metrics::DISCOVERY_NO_USEFUL_ENRS); + } else { + // Queue another discovery if we need to + self.maintain_peer_count(to_dial_peers); + } } /// A STATUS message has been received from a peer. This resets the status timer. From f126a42b7ef9467552bc7efaf799bc79e9077108 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 6 Aug 2024 12:32:58 +1000 Subject: [PATCH 07/18] Remove double-locking in `eth/v1/node/syncing` (#6202) * Remove double-locking in `eth/v1/node/syncing` --- beacon_node/http_api/src/lib.rs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index ce62ed63f22..f98f4493964 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -2872,7 +2872,11 @@ pub fn serve( task_spawner .blocking_json_task(Priority::P0, move || { - let head_slot = chain.canonical_head.cached_head().head_slot(); + let (head, head_execution_status) = chain + .canonical_head + .head_and_execution_status() + .map_err(warp_utils::reject::beacon_chain_error)?; + let head_slot = head.head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { warp_utils::reject::custom_server_error( @@ -2883,9 +2887,7 @@ pub fn serve( // Taking advantage of saturating subtraction on slot. let sync_distance = current_slot - head_slot; - let is_optimistic = chain - .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + let is_optimistic = head_execution_status.is_optimistic_or_invalid(); let syncing_data = api_types::SyncingData { is_syncing: !network_globals.sync_state.read().is_synced(), From 42a1cd81fb97b6d761a72baf82979425a9416609 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 6 Aug 2024 16:11:00 +1000 Subject: [PATCH 08/18] Don't expect DAS config in HTTP spec response (#6221) * Don't expect DAS config in HTTP spec response --- consensus/types/src/chain_spec.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ca4df32d1e5..b347d786396 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -1365,10 +1365,13 @@ pub struct Config { #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_exit_churn_limit: u64, + #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] custody_requirement: u64, + #[serde(default = "default_data_column_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] data_column_sidecar_subnet_count: u64, + #[serde(default = "default_number_of_columns")] #[serde(with = "serde_utils::quoted_u64")] number_of_columns: u64, } @@ -1509,6 +1512,18 @@ const fn default_maximum_gossip_clock_disparity_millis() -> u64 { 500 } +const fn default_custody_requirement() -> u64 { + 1 +} + +const fn default_data_column_sidecar_subnet_count() -> u64 { + 32 +} + +const fn default_number_of_columns() -> u64 { + 128 +} + fn max_blocks_by_root_request_common(max_request_blocks: u64) -> usize { let max_request_blocks = max_request_blocks as usize; RuntimeVariableList::::from_vec( From 76c5660a65d5599e96bd67d4ba9780dfb53c6ecc Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 6 Aug 2024 13:28:03 +0200 Subject: [PATCH 09/18] Add PeerDAS RPC import boilerplate --- beacon_node/beacon_chain/src/beacon_chain.rs | 100 +++++++++++++----- .../src/data_availability_checker.rs | 42 +++++--- .../overflow_lru_cache.rs | 29 ++--- .../src/data_column_verification.rs | 98 +++++++++++++---- beacon_node/beacon_processor/src/lib.rs | 17 ++- .../gossip_methods.rs | 2 +- .../src/network_beacon_processor/mod.rs | 19 ++++ .../network_beacon_processor/sync_methods.rs | 55 ++++++++++ 8 files changed, 282 insertions(+), 80 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8cd991cc103..ba9b6f0fbf4 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -23,7 +23,9 @@ use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, }; -use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; +use crate::data_column_verification::{ + DataColumnsSameBlock, GossipDataColumnError, GossipVerifiedDataColumn, +}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; @@ -2955,23 +2957,13 @@ impl BeaconChain { /// Cache the data columns in the processing cache, process it, then evict it from the cache if it was /// imported or errors. - pub async fn process_gossip_data_columns( + pub async fn process_gossip_data_column( self: &Arc, - data_columns: Vec>, + data_column: GossipVerifiedDataColumn, ) -> Result> { - let Ok((slot, block_root)) = data_columns - .iter() - .map(|c| (c.slot(), c.block_root())) - .unique() - .exactly_one() - else { - return Err(BlockError::InternalError( - "Columns should be from the same block".to_string(), - )); - }; - // If this block has already been imported to forkchoice it must have been available, so // we don't need to process its samples again. + let block_root = data_column.block_root(); if self .canonical_head .fork_choice_read_lock() @@ -2981,7 +2973,7 @@ impl BeaconChain { } let r = self - .check_gossip_data_columns_availability_and_import(slot, block_root, data_columns) + .check_gossip_data_columns_availability_and_import(data_column) .await; self.remove_notified_custody_columns(&block_root, r) } @@ -3020,6 +3012,31 @@ impl BeaconChain { self.remove_notified(&block_root, r) } + /// Cache the columns in the processing cache, process it, then evict it from the cache if it was + /// imported or errors. + pub async fn process_rpc_custody_columns( + self: &Arc, + custody_columns: DataColumnsSameBlock, + ) -> Result> { + // If this block has already been imported to forkchoice it must have been available, so + // we don't need to process its columns again. + let block_root = custody_columns.block_root(); + if self + .canonical_head + .fork_choice_read_lock() + .contains_block(&block_root) + { + return Err(BlockError::BlockIsAlreadyKnown(block_root)); + } + + // TODO(das): custody column SSE event + + let r = self + .check_rpc_custody_columns_availability_and_import(custody_columns) + .await; + self.remove_notified(&block_root, r) + } + /// Remove any block components from the *processing cache* if we no longer require them. If the /// block was imported full or erred, we no longer require them. fn remove_notified( @@ -3298,21 +3315,16 @@ impl BeaconChain { /// if so, otherwise caches the data column in the data availability checker. async fn check_gossip_data_columns_availability_and_import( self: &Arc, - slot: Slot, - block_root: Hash256, - data_columns: Vec>, + data_column: GossipVerifiedDataColumn, ) -> Result> { if let Some(slasher) = self.slasher.as_ref() { - for data_colum in &data_columns { - slasher.accept_block_header(data_colum.signed_block_header()); - } + slasher.accept_block_header(data_column.signed_block_header()); } - let availability = self.data_availability_checker.put_gossip_data_columns( - slot, - block_root, - data_columns, - )?; + let slot = data_column.slot(); + let availability = self + .data_availability_checker + .put_gossip_data_column(data_column)?; self.process_availability(slot, availability).await } @@ -3356,6 +3368,42 @@ impl BeaconChain { self.process_availability(slot, availability).await } + /// Checks if the provided columns can make any cached blocks available, and imports immediately + /// if so, otherwise caches the columns in the data availability checker. + async fn check_rpc_custody_columns_availability_and_import( + self: &Arc, + custody_columns: DataColumnsSameBlock, + ) -> Result> { + // Need to scope this to ensure the lock is dropped before calling `process_availability` + // Even an explicit drop is not enough to convince the borrow checker. + { + let mut slashable_cache = self.observed_slashable.write(); + let header = custody_columns.signed_block_header(); + let block_root = custody_columns.block_root(); + if verify_header_signature::>(self, header).is_ok() { + slashable_cache + .observe_slashable( + header.message.slot, + header.message.proposer_index, + block_root, + ) + .map_err(|e| BlockError::BeaconChainError(e.into()))?; + if let Some(slasher) = self.slasher.as_ref() { + slasher.accept_block_header(header.clone()); + } + } + } + + // This slot value is purely informative for the consumers of + // `AvailabilityProcessingStatus::MissingComponents` to log an error with a slot. + let slot = custody_columns.slot(); + let availability = self + .data_availability_checker + .put_rpc_custody_columns(custody_columns)?; + + self.process_availability(slot, availability).await + } + /// Imports a fully available block. Otherwise, returns `AvailabilityProcessingStatus::MissingComponents` /// /// An error is returned if the block was unable to be imported. It may be partially imported diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index ce5995a5581..c6b34b29d0e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -16,14 +16,15 @@ use task_executor::TaskExecutor; use types::blob_sidecar::{BlobIdentifier, BlobSidecar, FixedBlobSidecarList}; use types::{ BlobSidecarList, ChainSpec, DataColumnSidecarList, Epoch, EthSpec, Hash256, SignedBeaconBlock, - Slot, }; mod error; mod overflow_lru_cache; mod state_lru_cache; -use crate::data_column_verification::{GossipVerifiedDataColumn, KzgVerifiedCustodyDataColumn}; +use crate::data_column_verification::{ + DataColumnsSameBlock, GossipVerifiedDataColumn, KzgVerifiedDataColumnsSameBlock, +}; pub use error::{Error as AvailabilityCheckError, ErrorCategory as AvailabilityCheckErrorCategory}; use types::non_zero_usize::new_non_zero_usize; @@ -176,6 +177,25 @@ impl DataAvailabilityChecker { .put_kzg_verified_blobs(block_root, epoch, verified_blobs) } + /// Put a list of custody columns received via RPC into the availability cache. This performs KZG + /// verification on the blobs in the list. + #[allow(clippy::type_complexity)] + pub fn put_rpc_custody_columns( + &self, + custody_columns: DataColumnsSameBlock, + ) -> Result, AvailabilityCheckError> { + let Some(kzg) = self.kzg.as_ref() else { + return Err(AvailabilityCheckError::KzgNotInitialized); + }; + + // TODO(das): report which column is invalid for proper peer scoring + // TODO(das): batch KZG verification here + let verified_custody_columns = custody_columns.verify(kzg)?; + + self.availability_cache + .put_kzg_verified_data_columns(verified_custody_columns) + } + /// Check if we've cached other blobs for this block. If it completes a set and we also /// have a block cached, return the `Availability` variant triggering block import. /// Otherwise cache the blob sidecar. @@ -192,20 +212,18 @@ impl DataAvailabilityChecker { ) } - pub fn put_gossip_data_columns( + pub fn put_gossip_data_column( &self, - slot: Slot, - block_root: Hash256, - gossip_data_columns: Vec>, + gossip_data_column: GossipVerifiedDataColumn, ) -> Result, AvailabilityCheckError> { - let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); - let custody_columns = gossip_data_columns - .into_iter() - .map(|c| KzgVerifiedCustodyDataColumn::from_asserted_custody(c.into_inner())) - .collect::>(); + let custody_column = gossip_data_column.into_inner(); + + // Will never error as there's exactly one column + let custody_columns_same_block = KzgVerifiedDataColumnsSameBlock::new(vec![custody_column]) + .expect("exactly one column is always in same block"); self.availability_cache - .put_kzg_verified_data_columns(block_root, epoch, custody_columns) + .put_kzg_verified_data_columns(custody_columns_same_block) } /// Check if we have all the blobs for a block. Returns `Availability` which has information diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 6c9964bdf86..d681159eaff 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -5,7 +5,7 @@ use crate::block_verification_types::{ AvailabilityPendingExecutedBlock, AvailableBlock, AvailableExecutedBlock, }; use crate::data_availability_checker::{Availability, AvailabilityCheckError}; -use crate::data_column_verification::KzgVerifiedCustodyDataColumn; +use crate::data_column_verification::{KzgVerifiedDataColumn, KzgVerifiedDataColumnsSameBlock}; use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; @@ -24,7 +24,7 @@ use types::{BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, SignedBeaconBlock}; pub struct PendingComponents { pub block_root: Hash256, pub verified_blobs: FixedVector>, E::MaxBlobsPerBlock>, - pub verified_data_columns: Vec>, + pub verified_data_columns: Vec>, pub executed_block: Option>, } @@ -50,7 +50,7 @@ impl PendingComponents { pub fn get_cached_data_column( &self, data_column_index: u64, - ) -> Option<&KzgVerifiedCustodyDataColumn> { + ) -> Option<&KzgVerifiedDataColumn> { self.verified_data_columns .iter() .find(|d| d.index() == data_column_index) @@ -157,13 +157,10 @@ impl PendingComponents { } /// Merges a given set of data columns into the cache. - fn merge_data_columns>>( - &mut self, - kzg_verified_data_columns: I, - ) { + fn merge_data_columns(&mut self, kzg_verified_data_columns: &[KzgVerifiedDataColumn]) { for data_column in kzg_verified_data_columns { if !self.data_column_exists(data_column.index()) { - self.verified_data_columns.push(data_column); + self.verified_data_columns.push(data_column.clone()); } } } @@ -261,7 +258,7 @@ impl PendingComponents { BlockImportRequirement::CustodyColumns(_) => { let verified_data_columns = verified_data_columns .into_iter() - .map(|d| d.into_inner()) + .map(|d| d.to_data_column()) .collect(); (None, Some(verified_data_columns)) } @@ -430,17 +427,13 @@ impl DataAvailabilityCheckerInner { } } - // TODO(das): rpc code paths to be implemented. - #[allow(dead_code)] - pub fn put_kzg_verified_data_columns< - I: IntoIterator>, - >( + pub fn put_kzg_verified_data_columns( &self, - block_root: Hash256, - epoch: Epoch, - kzg_verified_data_columns: I, + data_columns: KzgVerifiedDataColumnsSameBlock, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); + let block_root = data_columns.block_root(); + let epoch = data_columns.slot().epoch(T::EthSpec::slots_per_epoch()); // Grab existing entry or create a new entry. let mut pending_components = write_lock @@ -449,7 +442,7 @@ impl DataAvailabilityCheckerInner { .unwrap_or_else(|| PendingComponents::empty(block_root)); // Merge in the data columns. - pending_components.merge_data_columns(kzg_verified_data_columns); + pending_components.merge_data_columns(data_columns.columns()); let block_import_requirement = self.block_import_requirement(epoch)?; if pending_components.is_available(&block_import_requirement) { diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index fa31d6f2e8e..b68c92f95aa 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -12,10 +12,11 @@ use slog::debug; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; use std::sync::Arc; +use tree_hash::TreeHash; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ - BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSubnetId, EthSpec, Hash256, - RuntimeVariableList, SignedBeaconBlockHeader, Slot, + BeaconStateError, ChainSpec, DataColumnSidecar, DataColumnSidecarList, DataColumnSubnetId, + EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlockHeader, Slot, }; /// An error occurred while validating a gossip data column. @@ -175,7 +176,7 @@ impl GossipVerifiedDataColumn { pub fn id(&self) -> DataColumnIdentifier { DataColumnIdentifier { block_root: self.block_root, - index: self.data_column.data_column_index(), + index: self.data_column.index(), } } @@ -219,34 +220,91 @@ impl KzgVerifiedDataColumn { self.data.clone() } - pub fn data_column_index(&self) -> u64 { + pub fn index(&self) -> ColumnIndex { self.data.index } } -/// Data column that we must custody and has completed kzg verification -#[derive(Debug, Derivative, Clone, Encode, Decode)] -#[derivative(PartialEq, Eq)] -#[ssz(struct_behaviour = "transparent")] -pub struct KzgVerifiedCustodyDataColumn { - data: Arc>, +/// Collection of data columns for the same block root +pub struct DataColumnsSameBlock { + block_root: Hash256, + signed_block_header: SignedBeaconBlockHeader, + columns: DataColumnSidecarList, } -impl KzgVerifiedCustodyDataColumn { - /// Mark a column as custody column. Caller must ensure that our current custody requirements - /// include this column - pub fn from_asserted_custody(kzg_verified: KzgVerifiedDataColumn) -> Self { - Self { - data: kzg_verified.to_data_column(), +impl DataColumnsSameBlock { + pub fn new(columns: DataColumnSidecarList) -> Result { + let first_column = columns.first().ok_or("empty columns")?; + let signed_block_header = first_column.signed_block_header.clone(); + for column in columns.iter().skip(1) { + if column.signed_block_header != signed_block_header { + return Err("no same block"); + } } + Ok(Self { + block_root: signed_block_header.message.tree_hash_root(), + signed_block_header, + columns, + }) } - pub fn index(&self) -> ColumnIndex { - self.data.index + pub fn verify(self, kzg: &Kzg) -> Result, KzgError> { + Ok(KzgVerifiedDataColumnsSameBlock { + block_root: self.block_root, + signed_block_header: self.signed_block_header, + columns: self + .columns + .into_iter() + .map(|column| KzgVerifiedDataColumn::new(column, kzg)) + .collect::, _>>()?, + }) } - pub fn into_inner(self) -> Arc> { - self.data + pub fn block_root(&self) -> Hash256 { + self.block_root + } + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + pub fn signed_block_header(&self) -> &SignedBeaconBlockHeader { + &self.signed_block_header + } + pub fn columns(&self) -> &DataColumnSidecarList { + &self.columns + } +} + +/// Collection of KZG verified data columns for the same block root +pub struct KzgVerifiedDataColumnsSameBlock { + block_root: Hash256, + signed_block_header: SignedBeaconBlockHeader, + columns: Vec>, +} + +impl KzgVerifiedDataColumnsSameBlock { + pub fn new(columns: Vec>) -> Result { + let first_column = columns.first().ok_or("empty columns")?; + let signed_block_header = first_column.as_data_column().signed_block_header.clone(); + for column in columns.iter().skip(1) { + if column.as_data_column().signed_block_header != signed_block_header { + return Err("no same block"); + } + } + Ok(Self { + block_root: signed_block_header.message.tree_hash_root(), + signed_block_header, + columns, + }) + } + + pub fn block_root(&self) -> Hash256 { + self.block_root + } + pub fn slot(&self) -> Slot { + self.signed_block_header.message.slot + } + pub fn columns(&self) -> &[KzgVerifiedDataColumn] { + &self.columns } } diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 68c33e99baf..6ce3b64acfe 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -108,6 +108,7 @@ pub struct BeaconProcessorQueueLengths { unknown_light_client_update_queue: usize, rpc_block_queue: usize, rpc_blob_queue: usize, + rpc_custody_column_queue: usize, chain_segment_queue: usize, backfill_chain_segment: usize, gossip_block_queue: usize, @@ -163,6 +164,7 @@ impl BeaconProcessorQueueLengths { unknown_light_client_update_queue: 128, rpc_block_queue: 1024, rpc_blob_queue: 1024, + rpc_custody_column_queue: 1024, chain_segment_queue: 64, backfill_chain_segment: 64, gossip_block_queue: 1024, @@ -228,6 +230,7 @@ pub const GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE: &str = "light_client_optimistic pub const RPC_BLOCK: &str = "rpc_block"; pub const IGNORED_RPC_BLOCK: &str = "ignored_rpc_block"; pub const RPC_BLOBS: &str = "rpc_blob"; +pub const RPC_CUSTODY_COLUMN: &str = "rpc_custody_column"; pub const CHAIN_SEGMENT: &str = "chain_segment"; pub const CHAIN_SEGMENT_BACKFILL: &str = "chain_segment_backfill"; pub const STATUS_PROCESSING: &str = "status_processing"; @@ -606,6 +609,7 @@ pub enum Work { RpcBlobs { process_fn: AsyncFn, }, + RpcCustodyColumn(AsyncFn), IgnoredRpcBlock { process_fn: BlockingFn, }, @@ -653,6 +657,7 @@ impl Work { Work::GossipLightClientOptimisticUpdate(_) => GOSSIP_LIGHT_CLIENT_OPTIMISTIC_UPDATE, Work::RpcBlock { .. } => RPC_BLOCK, Work::RpcBlobs { .. } => RPC_BLOBS, + Work::RpcCustodyColumn { .. } => RPC_CUSTODY_COLUMN, Work::IgnoredRpcBlock { .. } => IGNORED_RPC_BLOCK, Work::ChainSegment { .. } => CHAIN_SEGMENT, Work::ChainSegmentBackfill(_) => CHAIN_SEGMENT_BACKFILL, @@ -815,6 +820,7 @@ impl BeaconProcessor { // Using a FIFO queue since blocks need to be imported sequentially. let mut rpc_block_queue = FifoQueue::new(queue_lengths.rpc_block_queue); let mut rpc_blob_queue = FifoQueue::new(queue_lengths.rpc_blob_queue); + let mut rpc_custody_column_queue = FifoQueue::new(queue_lengths.rpc_custody_column_queue); let mut chain_segment_queue = FifoQueue::new(queue_lengths.chain_segment_queue); let mut backfill_chain_segment = FifoQueue::new(queue_lengths.backfill_chain_segment); let mut gossip_block_queue = FifoQueue::new(queue_lengths.gossip_block_queue); @@ -970,6 +976,8 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = rpc_blob_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = rpc_custody_column_queue.pop() { + self.spawn_worker(item, idle_tx); // Check delayed blocks before gossip blocks, the gossip blocks might rely // on the delayed ones. } else if let Some(item) = delayed_block_queue.pop() { @@ -1262,6 +1270,9 @@ impl BeaconProcessor { rpc_block_queue.push(work, work_id, &self.log) } Work::RpcBlobs { .. } => rpc_blob_queue.push(work, work_id, &self.log), + Work::RpcCustodyColumn { .. } => { + rpc_custody_column_queue.push(work, work_id, &self.log) + } Work::ChainSegment { .. } => { chain_segment_queue.push(work, work_id, &self.log) } @@ -1497,9 +1508,9 @@ impl BeaconProcessor { beacon_block_root: _, process_fn, } => task_spawner.spawn_async(process_fn), - Work::RpcBlock { process_fn } | Work::RpcBlobs { process_fn } => { - task_spawner.spawn_async(process_fn) - } + Work::RpcBlock { process_fn } + | Work::RpcBlobs { process_fn } + | Work::RpcCustodyColumn(process_fn) => task_spawner.spawn_async(process_fn), Work::IgnoredRpcBlock { process_fn } => task_spawner.spawn_blocking(process_fn), Work::GossipBlock(work) | Work::GossipBlobSidecar(work) diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index 4c5c34bfd83..c8007f60dfa 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -988,7 +988,7 @@ impl NetworkBeaconProcessor { match self .chain - .process_gossip_data_columns(vec![verified_data_column]) + .process_gossip_data_column(verified_data_column) .await { Ok(availability) => { diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 9fb14fdcb8c..7ca68be82ba 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1,6 +1,7 @@ use crate::sync::manager::BlockProcessType; use crate::{service::NetworkMessage, sync::manager::SyncMessage}; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::data_column_verification::DataColumnsSameBlock; use beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend, BeaconChain}; use beacon_chain::{BeaconChainTypes, NotifyExecutionLayer}; use beacon_processor::{ @@ -476,6 +477,24 @@ impl NetworkBeaconProcessor { }) } + /// Create a new `Work` event for some custody columns. `process_rpc_custody_columns` reports + /// the result back to sync. + pub fn send_rpc_custody_columns( + self: &Arc, + custody_columns: DataColumnsSameBlock, + seen_timestamp: Duration, + process_type: BlockProcessType, + ) -> Result<(), Error> { + let s = self.clone(); + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::RpcCustodyColumn(Box::pin(async move { + s.process_rpc_custody_columns(custody_columns, seen_timestamp, process_type) + .await; + })), + }) + } + /// Create a new work event to import `blocks` as a beacon chain segment. pub fn send_chain_segment( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/sync_methods.rs b/beacon_node/network/src/network_beacon_processor/sync_methods.rs index 68bd6745144..59120af324e 100644 --- a/beacon_node/network/src/network_beacon_processor/sync_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/sync_methods.rs @@ -8,6 +8,7 @@ use crate::sync::{ use beacon_chain::block_verification_types::{AsBlock, RpcBlock}; use beacon_chain::data_availability_checker::AvailabilityCheckError; use beacon_chain::data_availability_checker::MaybeAvailableBlock; +use beacon_chain::data_column_verification::DataColumnsSameBlock; use beacon_chain::{ validator_monitor::get_slot_delay_ms, AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError, ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer, @@ -307,6 +308,60 @@ impl NetworkBeaconProcessor { }); } + pub async fn process_rpc_custody_columns( + self: Arc>, + custody_columns: DataColumnsSameBlock, + _seen_timestamp: Duration, + process_type: BlockProcessType, + ) { + let block_root = custody_columns.block_root(); + let result = self + .chain + .process_rpc_custody_columns(custody_columns) + .await; + + match &result { + Ok(availability) => match availability { + AvailabilityProcessingStatus::Imported(hash) => { + debug!( + self.log, + "Block components retrieved"; + "result" => "imported block and custody columns", + "block_hash" => %hash, + ); + self.chain.recompute_head_at_current_slot().await; + } + AvailabilityProcessingStatus::MissingComponents(_, _) => { + debug!( + self.log, + "Missing components over rpc"; + "block_hash" => %block_root, + ); + } + }, + Err(BlockError::BlockIsAlreadyKnown(_)) => { + debug!( + self.log, + "Custody columns have already been imported"; + "block_hash" => %block_root, + ); + } + Err(e) => { + warn!( + self.log, + "Error when importing rpc custody columns"; + "error" => ?e, + "block_hash" => %block_root, + ); + } + } + + self.send_sync_message(SyncMessage::BlockComponentProcessed { + process_type, + result: result.into(), + }); + } + /// Attempt to import the chain segment (`blocks`) to the beacon chain, informing the sync /// thread if more blocks are needed to process it. pub async fn process_chain_segment( From a68f34a014ea7fa4788497dd38b5e1a960957fe4 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 8 Aug 2024 09:31:35 +1000 Subject: [PATCH 10/18] Broadcast VC requests in parallel and fix subscription error (#6223) * Broadcast VC requests in parallel * Remove outdated comment * Try some things * Fix subscription error * Remove junk logging --- common/eth2/src/lib.rs | 9 +- validator_client/src/beacon_node_fallback.rs | 88 +++++++++++--------- validator_client/src/duties_service.rs | 53 ++++++++---- validator_client/src/lib.rs | 3 + 4 files changed, 94 insertions(+), 59 deletions(-) diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 6d000f576f9..3642f4bfe47 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -121,6 +121,7 @@ impl fmt::Display for Error { pub struct Timeouts { pub attestation: Duration, pub attester_duties: Duration, + pub attestation_subscriptions: Duration, pub liveness: Duration, pub proposal: Duration, pub proposer_duties: Duration, @@ -137,6 +138,7 @@ impl Timeouts { Timeouts { attestation: timeout, attester_duties: timeout, + attestation_subscriptions: timeout, liveness: timeout, proposal: timeout, proposer_duties: timeout, @@ -2515,7 +2517,12 @@ impl BeaconNodeHttpClient { .push("validator") .push("beacon_committee_subscriptions"); - self.post(path, &subscriptions).await?; + self.post_with_timeout( + path, + &subscriptions, + self.timeouts.attestation_subscriptions, + ) + .await?; Ok(()) } diff --git a/validator_client/src/beacon_node_fallback.rs b/validator_client/src/beacon_node_fallback.rs index 4467b807865..58d7f9d8eef 100644 --- a/validator_client/src/beacon_node_fallback.rs +++ b/validator_client/src/beacon_node_fallback.rs @@ -134,6 +134,12 @@ impl fmt::Display for Errors { } } +impl Errors { + pub fn num_errors(&self) -> usize { + self.0.len() + } +} + /// Reasons why a candidate might not be ready. #[derive(Debug, Clone, Copy)] pub enum CandidateError { @@ -599,46 +605,41 @@ impl BeaconNodeFallback { F: Fn(&'a BeaconNodeHttpClient) -> R, R: Future>, { - let mut results = vec![]; let mut to_retry = vec![]; let mut retry_unsynced = vec![]; // Run `func` using a `candidate`, returning the value or capturing errors. - // - // We use a macro instead of a closure here since it is not trivial to move `func` into a - // closure. - macro_rules! try_func { - ($candidate: ident) => {{ - inc_counter_vec(&ENDPOINT_REQUESTS, &[$candidate.beacon_node.as_ref()]); + let run_on_candidate = |candidate: &'a CandidateBeaconNode| async { + inc_counter_vec(&ENDPOINT_REQUESTS, &[candidate.beacon_node.as_ref()]); - // There exists a race condition where `func` may be called when the candidate is - // actually not ready. We deem this an acceptable inefficiency. - match func(&$candidate.beacon_node).await { - Ok(val) => results.push(Ok(val)), - Err(e) => { - // If we have an error on this function, make the client as not-ready. - // - // There exists a race condition where the candidate may have been marked - // as ready between the `func` call and now. We deem this an acceptable - // inefficiency. - if matches!(offline_on_failure, OfflineOnFailure::Yes) { - $candidate.set_offline().await; - } - results.push(Err(( - $candidate.beacon_node.to_string(), - Error::RequestFailed(e), - ))); - inc_counter_vec(&ENDPOINT_ERRORS, &[$candidate.beacon_node.as_ref()]); + // There exists a race condition where `func` may be called when the candidate is + // actually not ready. We deem this an acceptable inefficiency. + match func(&candidate.beacon_node).await { + Ok(val) => Ok(val), + Err(e) => { + // If we have an error on this function, mark the client as not-ready. + // + // There exists a race condition where the candidate may have been marked + // as ready between the `func` call and now. We deem this an acceptable + // inefficiency. + if matches!(offline_on_failure, OfflineOnFailure::Yes) { + candidate.set_offline().await; } + inc_counter_vec(&ENDPOINT_ERRORS, &[candidate.beacon_node.as_ref()]); + Err((candidate.beacon_node.to_string(), Error::RequestFailed(e))) } - }}; - } + } + }; // First pass: try `func` on all synced and ready candidates. // // This ensures that we always choose a synced node if it is available. + let mut first_batch_futures = vec![]; for candidate in &self.candidates { match candidate.status(RequireSynced::Yes).await { + Ok(_) => { + first_batch_futures.push(run_on_candidate(candidate)); + } Err(CandidateError::NotSynced) if require_synced == false => { // This client is unsynced we will try it after trying all synced clients retry_unsynced.push(candidate); @@ -647,22 +648,24 @@ impl BeaconNodeFallback { // This client was not ready on the first pass, we might try it again later. to_retry.push(candidate); } - Ok(_) => try_func!(candidate), } } + let first_batch_results = futures::future::join_all(first_batch_futures).await; // Second pass: try `func` on ready unsynced candidates. This only runs if we permit // unsynced candidates. // // Due to async race-conditions, it is possible that we will send a request to a candidate // that has been set to an offline/unready status. This is acceptable. - if require_synced == false { - for candidate in retry_unsynced { - try_func!(candidate); - } - } + let second_batch_results = if require_synced == false { + futures::future::join_all(retry_unsynced.into_iter().map(run_on_candidate)).await + } else { + vec![] + }; // Third pass: try again, attempting to make non-ready clients become ready. + let mut third_batch_futures = vec![]; + let mut third_batch_results = vec![]; for candidate in to_retry { // If the candidate hasn't luckily transferred into the correct state in the meantime, // force an update of the state. @@ -676,16 +679,21 @@ impl BeaconNodeFallback { }; match new_status { - Ok(()) => try_func!(candidate), - Err(CandidateError::NotSynced) if require_synced == false => try_func!(candidate), - Err(e) => { - results.push(Err(( - candidate.beacon_node.to_string(), - Error::Unavailable(e), - ))); + Ok(()) => third_batch_futures.push(run_on_candidate(candidate)), + Err(CandidateError::NotSynced) if require_synced == false => { + third_batch_futures.push(run_on_candidate(candidate)) } + Err(e) => third_batch_results.push(Err(( + candidate.beacon_node.to_string(), + Error::Unavailable(e), + ))), } } + third_batch_results.extend(futures::future::join_all(third_batch_futures).await); + + let mut results = first_batch_results; + results.extend(second_batch_results); + results.extend(third_batch_results); let errors: Vec<_> = results.into_iter().filter_map(|res| res.err()).collect(); diff --git a/validator_client/src/duties_service.rs b/validator_client/src/duties_service.rs index 880f0eaa488..faa157a8592 100644 --- a/validator_client/src/duties_service.rs +++ b/validator_client/src/duties_service.rs @@ -86,7 +86,8 @@ const _: () = assert!({ /// This number is based upon `MIN_PEER_DISCOVERY_SLOT_LOOK_AHEAD` value in the /// `beacon_node::network::attestation_service` crate. It is not imported directly to avoid /// bringing in the entire crate. -const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > 2); +const MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD: u64 = 2; +const _: () = assert!(ATTESTATION_SUBSCRIPTION_OFFSETS[0] > MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD); // The info in the enum variants is displayed in logging, clippy thinks it's dead code. #[derive(Debug)] @@ -121,6 +122,8 @@ pub struct DutyAndProof { pub struct SubscriptionSlots { /// Pairs of `(slot, already_sent)` in slot-descending order. slots: Vec<(Slot, AtomicBool)>, + /// The slot of the duty itself. + duty_slot: Slot, } /// Create a selection proof for `duty`. @@ -172,18 +175,20 @@ impl SubscriptionSlots { .filter(|scheduled_slot| *scheduled_slot > current_slot) .map(|scheduled_slot| (scheduled_slot, AtomicBool::new(false))) .collect(); - Arc::new(Self { slots }) + Arc::new(Self { slots, duty_slot }) } /// Return `true` if we should send a subscription at `slot`. fn should_send_subscription_at(&self, slot: Slot) -> bool { // Iterate slots from smallest to largest looking for one that hasn't been completed yet. - self.slots - .iter() - .rev() - .any(|(scheduled_slot, already_sent)| { - slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) - }) + slot + MIN_ATTESTATION_SUBSCRIPTION_LOOKAHEAD <= self.duty_slot + && self + .slots + .iter() + .rev() + .any(|(scheduled_slot, already_sent)| { + slot >= *scheduled_slot && !already_sent.load(Ordering::Relaxed) + }) } /// Update our record of subscribed slots to account for successful subscription at `slot`. @@ -737,7 +742,7 @@ async fn poll_beacon_attesters( // If there are any subscriptions, push them out to beacon nodes if !subscriptions.is_empty() { let subscriptions_ref = &subscriptions; - if let Err(e) = duties_service + let subscription_result = duties_service .beacon_nodes .request( RequireSynced::No, @@ -753,15 +758,8 @@ async fn poll_beacon_attesters( .await }, ) - .await - { - error!( - log, - "Failed to subscribe validators"; - "error" => %e - ) - } else { - // Record that subscriptions were successfully sent. + .await; + if subscription_result.as_ref().is_ok() { debug!( log, "Broadcast attestation subscriptions"; @@ -770,6 +768,25 @@ async fn poll_beacon_attesters( for subscription_slots in subscription_slots_to_confirm { subscription_slots.record_successful_subscription_at(current_slot); } + } else if let Err(e) = subscription_result { + if e.num_errors() < duties_service.beacon_nodes.num_total() { + warn!( + log, + "Some subscriptions failed"; + "error" => %e, + ); + // If subscriptions were sent to at least one node, regard that as a success. + // There is some redundancy built into the subscription schedule to handle failures. + for subscription_slots in subscription_slots_to_confirm { + subscription_slots.record_successful_subscription_at(current_slot); + } + } else { + error!( + log, + "All subscriptions failed"; + "error" => %e + ); + } } } diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 729ff62ee30..dff50582dfe 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -75,6 +75,7 @@ const WAITING_FOR_GENESIS_POLL_TIME: Duration = Duration::from_secs(12); /// This can help ensure that proper endpoint fallback occurs. const HTTP_ATTESTATION_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; +const HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT: u32 = 24; const HTTP_LIVENESS_TIMEOUT_QUOTIENT: u32 = 4; const HTTP_PROPOSAL_TIMEOUT_QUOTIENT: u32 = 2; const HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT: u32 = 4; @@ -323,6 +324,8 @@ impl ProductionValidatorClient { Timeouts { attestation: slot_duration / HTTP_ATTESTATION_TIMEOUT_QUOTIENT, attester_duties: slot_duration / HTTP_ATTESTER_DUTIES_TIMEOUT_QUOTIENT, + attestation_subscriptions: slot_duration + / HTTP_ATTESTATION_SUBSCRIPTIONS_TIMEOUT_QUOTIENT, liveness: slot_duration / HTTP_LIVENESS_TIMEOUT_QUOTIENT, proposal: slot_duration / HTTP_PROPOSAL_TIMEOUT_QUOTIENT, proposer_duties: slot_duration / HTTP_PROPOSER_DUTIES_TIMEOUT_QUOTIENT, From d6ba8c397557f5c977b70f0d822a9228e98ca214 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Thu, 8 Aug 2024 12:17:03 +1000 Subject: [PATCH 11/18] Release v5.3.0 (#6194) * Release v5.3.0 --- Cargo.lock | 8 ++++---- beacon_node/Cargo.toml | 2 +- boot_node/Cargo.toml | 2 +- common/lighthouse_version/src/lib.rs | 4 ++-- lcli/Cargo.toml | 2 +- lighthouse/Cargo.toml | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 872c6e3368b..0c7d10d6a58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -836,7 +836,7 @@ dependencies = [ [[package]] name = "beacon_node" -version = "5.2.1" +version = "5.3.0" dependencies = [ "beacon_chain", "clap", @@ -1043,7 +1043,7 @@ dependencies = [ [[package]] name = "boot_node" -version = "5.2.1" +version = "5.3.0" dependencies = [ "beacon_node", "clap", @@ -4376,7 +4376,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lcli" -version = "5.2.1" +version = "5.3.0" dependencies = [ "account_utils", "beacon_chain", @@ -4947,7 +4947,7 @@ dependencies = [ [[package]] name = "lighthouse" -version = "5.2.1" +version = "5.3.0" dependencies = [ "account_manager", "account_utils", diff --git a/beacon_node/Cargo.toml b/beacon_node/Cargo.toml index a5fd29c971f..146f1c1018e 100644 --- a/beacon_node/Cargo.toml +++ b/beacon_node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "beacon_node" -version = "5.2.1" +version = "5.3.0" authors = [ "Paul Hauner ", "Age Manning "] edition = { workspace = true } diff --git a/common/lighthouse_version/src/lib.rs b/common/lighthouse_version/src/lib.rs index d32d7994689..f988dd86b1f 100644 --- a/common/lighthouse_version/src/lib.rs +++ b/common/lighthouse_version/src/lib.rs @@ -17,8 +17,8 @@ pub const VERSION: &str = git_version!( // NOTE: using --match instead of --exclude for compatibility with old Git "--match=thiswillnevermatchlol" ], - prefix = "Lighthouse/v5.2.1-", - fallback = "Lighthouse/v5.2.1" + prefix = "Lighthouse/v5.3.0-", + fallback = "Lighthouse/v5.3.0" ); /// Returns the first eight characters of the latest commit hash for this build. diff --git a/lcli/Cargo.toml b/lcli/Cargo.toml index 3cddd8ee60b..30721f3d5ba 100644 --- a/lcli/Cargo.toml +++ b/lcli/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "lcli" description = "Lighthouse CLI (modeled after zcli)" -version = "5.2.1" +version = "5.3.0" authors = ["Paul Hauner "] edition = { workspace = true } diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index b9d3eaf8941..b381a3fb0e3 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "lighthouse" -version = "5.2.1" +version = "5.3.0" authors = ["Sigma Prime "] edition = { workspace = true } autotests = false From aad8727f52fc3a53df77b9f7b7e08a591e6fef14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Thu, 8 Aug 2024 06:55:47 +0100 Subject: [PATCH 12/18] add missing use std::sync::Lazylock to malloc_utils::glibc (#6234) * add missing use std::sync::Lazylock --- common/malloc_utils/src/glibc.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/common/malloc_utils/src/glibc.rs b/common/malloc_utils/src/glibc.rs index 9531102682d..41d8d28291d 100644 --- a/common/malloc_utils/src/glibc.rs +++ b/common/malloc_utils/src/glibc.rs @@ -9,6 +9,7 @@ use parking_lot::Mutex; use std::env; use std::os::raw::c_int; use std::result::Result; +use std::sync::LazyLock; /// The optimal mmap threshold for Lighthouse seems to be around 128KB. /// From 3913ea44c6dc412c6e723383dd398264a967faf3 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 9 Aug 2024 00:36:20 -0700 Subject: [PATCH 13/18] Persist light client updates (#5545) * persist light client updates * update beacon chain to serve light client updates * resolve todos * cache best update * extend cache parts * is better light client update * resolve merge conflict * initial api changes * add lc update db column * fmt * added tests * add sim * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * fix some weird issues with the simulator * tests * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * test changes * merge conflict * testing * started work on ef tests and some code clean up * update tests * linting * noop pre altair, were still failing on electra though * allow for zeroed light client header * Merge branch 'unstable' of https://github.com/sigp/lighthouse into persist-light-client-updates * merge unstable * remove unwraps * remove unwraps * Update light_client_update.rs * merge unstable * move functionality to helper methods * refactor is best update fn * refactor is best update fn * improve organization of light client server cache logic * fork diget calc, and only spawn as many blcoks as we need for the lc update test * fetch lc update from the cache if it exists * fmt * Fix beacon_chain tests * Add debug code to update ranking_order ef test * Fix compare code * merge conflicts * fix test * Merge branch 'persist-light-client-updates' of https://github.com/eserilev/lighthouse into persist-light-client-updates * Use blinded blocks for light client proofs * fix ef test * merge conflicts * fix lc update check * Lint * resolve merge conflict * Merge branch 'persist-light-client-updates' of https://github.com/eserilev/lighthouse into persist-light-client-updates * revert basic sim * small fix * revert sim * Review PR * resolve merge conflicts * Merge branch 'unstable' into persist-light-client-updates --- beacon_node/beacon_chain/src/beacon_chain.rs | 15 +- .../src/light_client_server_cache.rs | 223 +++++++++++---- beacon_node/beacon_chain/tests/store_tests.rs | 251 +++++++++++++++++ beacon_node/http_api/src/lib.rs | 29 +- beacon_node/http_api/src/light_client.rs | 143 ++++++++++ beacon_node/http_api/tests/tests.rs | 42 +++ beacon_node/store/src/leveldb_store.rs | 1 - beacon_node/store/src/lib.rs | 6 +- common/eth2/src/lib.rs | 25 ++ common/eth2/src/types.rs | 18 ++ .../types/src/light_client_finality_update.rs | 13 + consensus/types/src/light_client_header.rs | 42 +++ .../src/light_client_optimistic_update.rs | 13 + consensus/types/src/light_client_update.rs | 265 +++++++++++++----- testing/ef_tests/check_all_files_accessed.py | 4 +- testing/ef_tests/src/cases.rs | 2 + .../light_client_verify_is_better_update.rs | 110 ++++++++ testing/ef_tests/src/decode.rs | 12 +- testing/ef_tests/src/error.rs | 3 + testing/ef_tests/src/handler.rs | 26 ++ testing/ef_tests/tests/tests.rs | 5 + 21 files changed, 1124 insertions(+), 124 deletions(-) create mode 100644 beacon_node/http_api/src/light_client.rs create mode 100644 testing/ef_tests/src/cases/light_client_verify_is_better_update.rs diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 8cd991cc103..3bf75284779 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -1351,14 +1351,27 @@ impl BeaconChain { ) -> Result<(), Error> { self.light_client_server_cache.recompute_and_cache_updates( self.store.clone(), - &parent_root, slot, + &parent_root, &sync_aggregate, &self.log, &self.spec, ) } + pub fn get_light_client_updates( + &self, + sync_committee_period: u64, + count: u64, + ) -> Result>, Error> { + self.light_client_server_cache.get_light_client_updates( + &self.store, + sync_committee_period, + count, + &self.spec, + ) + } + /// Returns the current heads of the `BeaconChain`. For the canonical head, see `Self::head`. /// /// Returns `(block_root, block_slot)`. diff --git a/beacon_node/beacon_chain/src/light_client_server_cache.rs b/beacon_node/beacon_chain/src/light_client_server_cache.rs index 87513885f77..efc746675dc 100644 --- a/beacon_node/beacon_chain/src/light_client_server_cache.rs +++ b/beacon_node/beacon_chain/src/light_client_server_cache.rs @@ -1,14 +1,23 @@ use crate::errors::BeaconChainError; use crate::{metrics, BeaconChainTypes, BeaconStore}; use parking_lot::{Mutex, RwLock}; +use safe_arith::SafeArith; use slog::{debug, Logger}; +use ssz::Decode; +use ssz::Encode; use ssz_types::FixedVector; use std::num::NonZeroUsize; -use types::light_client_update::{FinalizedRootProofLen, FINALIZED_ROOT_INDEX}; +use std::sync::Arc; +use store::DBColumn; +use store::KeyValueStore; +use types::light_client_update::{ + FinalizedRootProofLen, NextSyncCommitteeProofLen, FINALIZED_ROOT_INDEX, + NEXT_SYNC_COMMITTEE_INDEX, +}; use types::non_zero_usize::new_non_zero_usize; use types::{ BeaconBlockRef, BeaconState, ChainSpec, EthSpec, ForkName, Hash256, LightClientFinalityUpdate, - LightClientOptimisticUpdate, Slot, SyncAggregate, + LightClientOptimisticUpdate, LightClientUpdate, Slot, SyncAggregate, SyncCommittee, }; /// A prev block cache miss requires to re-generate the state of the post-parent block. Items in the @@ -30,8 +39,10 @@ pub struct LightClientServerCache { latest_finality_update: RwLock>>, /// Tracks a single global latest optimistic update out of all imported blocks. latest_optimistic_update: RwLock>>, + /// Caches the most recent light client update + latest_light_client_update: RwLock>>, /// Caches state proofs by block root - prev_block_cache: Mutex>, + prev_block_cache: Mutex>>, } impl LightClientServerCache { @@ -39,13 +50,14 @@ impl LightClientServerCache { Self { latest_finality_update: None.into(), latest_optimistic_update: None.into(), + latest_light_client_update: None.into(), prev_block_cache: lru::LruCache::new(PREV_BLOCK_CACHE_SIZE).into(), } } /// Compute and cache state proofs for latter production of light-client messages. Does not /// trigger block replay. - pub fn cache_state_data( + pub(crate) fn cache_state_data( &self, spec: &ChainSpec, block: BeaconBlockRef, @@ -67,13 +79,13 @@ impl LightClientServerCache { Ok(()) } - /// Given a block with a SyncAggregte computes better or more recent light client updates. The + /// Given a block with a SyncAggregate computes better or more recent light client updates. The /// results are cached either on disk or memory to be served via p2p and rest API pub fn recompute_and_cache_updates( &self, store: BeaconStore, - block_parent_root: &Hash256, block_slot: Slot, + block_parent_root: &Hash256, sync_aggregate: &SyncAggregate, log: &Logger, chain_spec: &ChainSpec, @@ -100,11 +112,17 @@ impl LightClientServerCache { let attested_slot = attested_block.slot(); + let maybe_finalized_block = store.get_blinded_block(&cached_parts.finalized_block_root)?; + + let sync_period = block_slot + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + // Spec: Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice let is_latest_optimistic = match &self.latest_optimistic_update.read().clone() { Some(latest_optimistic_update) => { - is_latest_optimistic_update(latest_optimistic_update, attested_slot, signature_slot) + latest_optimistic_update.is_latest(attested_slot, signature_slot) } None => true, }; @@ -122,18 +140,17 @@ impl LightClientServerCache { // attested_header.beacon.slot (if multiple, highest signature_slot) as selected by fork choice let is_latest_finality = match &self.latest_finality_update.read().clone() { Some(latest_finality_update) => { - is_latest_finality_update(latest_finality_update, attested_slot, signature_slot) + latest_finality_update.is_latest(attested_slot, signature_slot) } None => true, }; + if is_latest_finality & !cached_parts.finalized_block_root.is_zero() { // Immediately after checkpoint sync the finalized block may not be available yet. - if let Some(finalized_block) = - store.get_blinded_block(&cached_parts.finalized_block_root)? - { + if let Some(finalized_block) = maybe_finalized_block.as_ref() { *self.latest_finality_update.write() = Some(LightClientFinalityUpdate::new( &attested_block, - &finalized_block, + finalized_block, cached_parts.finality_branch.clone(), sync_aggregate.clone(), signature_slot, @@ -148,9 +165,142 @@ impl LightClientServerCache { } } + let new_light_client_update = LightClientUpdate::new( + sync_aggregate, + block_slot, + cached_parts.next_sync_committee, + cached_parts.next_sync_committee_branch, + cached_parts.finality_branch, + &attested_block, + maybe_finalized_block.as_ref(), + chain_spec, + )?; + + // Spec: Full nodes SHOULD provide the best derivable LightClientUpdate (according to is_better_update) + // for each sync committee period + let prev_light_client_update = match &self.latest_light_client_update.read().clone() { + Some(prev_light_client_update) => Some(prev_light_client_update.clone()), + None => self.get_light_client_update(&store, sync_period, chain_spec)?, + }; + + let should_persist_light_client_update = + if let Some(prev_light_client_update) = prev_light_client_update { + let prev_sync_period = prev_light_client_update + .signature_slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + + if sync_period != prev_sync_period { + true + } else { + prev_light_client_update + .is_better_light_client_update(&new_light_client_update, chain_spec)? + } + } else { + true + }; + + if should_persist_light_client_update { + self.store_light_client_update(&store, sync_period, &new_light_client_update)?; + } + Ok(()) } + fn store_light_client_update( + &self, + store: &BeaconStore, + sync_committee_period: u64, + light_client_update: &LightClientUpdate, + ) -> Result<(), BeaconChainError> { + let column = DBColumn::LightClientUpdate; + + store.hot_db.put_bytes( + column.into(), + &sync_committee_period.to_le_bytes(), + &light_client_update.as_ssz_bytes(), + )?; + + *self.latest_light_client_update.write() = Some(light_client_update.clone()); + + Ok(()) + } + + // Used to fetch the most recently persisted "best" light client update. + // Should not be used outside the light client server, as it also caches the fetched + // light client update. + fn get_light_client_update( + &self, + store: &BeaconStore, + sync_committee_period: u64, + chain_spec: &ChainSpec, + ) -> Result>, BeaconChainError> { + if let Some(latest_light_client_update) = self.latest_light_client_update.read().clone() { + let latest_lc_update_sync_committee_period = latest_light_client_update + .signature_slot() + .epoch(T::EthSpec::slots_per_epoch()) + .sync_committee_period(chain_spec)?; + if latest_lc_update_sync_committee_period == sync_committee_period { + return Ok(Some(latest_light_client_update)); + } + } + + let column = DBColumn::LightClientUpdate; + let res = store + .hot_db + .get_bytes(column.into(), &sync_committee_period.to_le_bytes())?; + + if let Some(light_client_update_bytes) = res { + let epoch = sync_committee_period + .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; + + let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) + .map_err(store::errors::Error::SszDecodeError)?; + + *self.latest_light_client_update.write() = Some(light_client_update.clone()); + return Ok(Some(light_client_update)); + } + + Ok(None) + } + + pub fn get_light_client_updates( + &self, + store: &BeaconStore, + start_period: u64, + count: u64, + chain_spec: &ChainSpec, + ) -> Result>, BeaconChainError> { + let column = DBColumn::LightClientUpdate; + let mut light_client_updates = vec![]; + for res in store + .hot_db + .iter_column_from::>(column, &start_period.to_le_bytes()) + { + let (sync_committee_bytes, light_client_update_bytes) = res?; + let sync_committee_period = u64::from_ssz_bytes(&sync_committee_bytes) + .map_err(store::errors::Error::SszDecodeError)?; + let epoch = sync_committee_period + .safe_mul(chain_spec.epochs_per_sync_committee_period.into())?; + + let fork_name = chain_spec.fork_name_at_epoch(epoch.into()); + + let light_client_update = + LightClientUpdate::from_ssz_bytes(&light_client_update_bytes, &fork_name) + .map_err(store::errors::Error::SszDecodeError)?; + + light_client_updates.push(light_client_update); + + if sync_committee_period >= start_period + count { + break; + } + } + Ok(light_client_updates) + } + /// Retrieves prev block cached data from cache. If not present re-computes by retrieving the /// parent state, and inserts an entry to the cache. /// @@ -161,7 +311,7 @@ impl LightClientServerCache { block_root: &Hash256, block_state_root: &Hash256, block_slot: Slot, - ) -> Result { + ) -> Result, BeaconChainError> { // Attempt to get the value from the cache first. if let Some(cached_parts) = self.prev_block_cache.lock().get(block_root) { return Ok(cached_parts.clone()); @@ -199,52 +349,25 @@ impl Default for LightClientServerCache { } type FinalityBranch = FixedVector; +type NextSyncCommitteeBranch = FixedVector; #[derive(Clone)] -struct LightClientCachedData { +struct LightClientCachedData { finality_branch: FinalityBranch, + next_sync_committee_branch: NextSyncCommitteeBranch, + next_sync_committee: Arc>, finalized_block_root: Hash256, } -impl LightClientCachedData { - fn from_state(state: &mut BeaconState) -> Result { +impl LightClientCachedData { + fn from_state(state: &mut BeaconState) -> Result { Ok(Self { finality_branch: state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?.into(), + next_sync_committee: state.next_sync_committee()?.clone(), + next_sync_committee_branch: state + .compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)? + .into(), finalized_block_root: state.finalized_checkpoint().root, }) } } - -// Implements spec prioritization rules: -// > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) -// -// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update -fn is_latest_finality_update( - prev: &LightClientFinalityUpdate, - attested_slot: Slot, - signature_slot: Slot, -) -> bool { - let prev_slot = prev.get_attested_header_slot(); - if attested_slot > prev_slot { - true - } else { - attested_slot == prev_slot && signature_slot > *prev.signature_slot() - } -} - -// Implements spec prioritization rules: -// > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) -// -// ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update -fn is_latest_optimistic_update( - prev: &LightClientOptimisticUpdate, - attested_slot: Slot, - signature_slot: Slot, -) -> bool { - let prev_slot = prev.get_slot(); - if attested_slot > prev_slot { - true - } else { - attested_slot == prev_slot && signature_slot > *prev.signature_slot() - } -} diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 01d7798b92c..7049bf14fde 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -5,6 +5,7 @@ use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::schema_change::migrate_schema; +use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::test_utils::{ mock_execution_layer_from_parts, test_spec, AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, KZG, @@ -103,6 +104,256 @@ fn get_harness_generic( harness } +#[tokio::test] +async fn light_client_updates_test() { + let spec = test_spec::(); + let Some(_) = spec.altair_fork_epoch else { + // No-op prior to Altair. + return; + }; + + let num_final_blocks = E::slots_per_epoch() * 2; + let checkpoint_slot = Slot::new(E::slots_per_epoch() * 9); + let db_path = tempdir().unwrap(); + let log = test_logger(); + + let seconds_per_slot = spec.seconds_per_slot; + let store = get_store_generic( + &db_path, + StoreConfig { + slots_per_restore_point: 2 * E::slots_per_epoch(), + ..Default::default() + }, + test_spec::(), + ); + let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT); + let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); + let num_initial_slots = E::slots_per_epoch() * 10; + let slots: Vec = (1..num_initial_slots).map(Slot::new).collect(); + + let (genesis_state, genesis_state_root) = harness.get_current_state_and_root(); + harness + .add_attested_blocks_at_slots( + genesis_state.clone(), + genesis_state_root, + &slots, + &all_validators, + ) + .await; + + let wss_block_root = harness + .chain + .block_root_at_slot(checkpoint_slot, WhenSlotSkipped::Prev) + .unwrap() + .unwrap(); + let wss_state_root = harness + .chain + .state_root_at_slot(checkpoint_slot) + .unwrap() + .unwrap(); + let wss_block = harness + .chain + .store + .get_full_block(&wss_block_root) + .unwrap() + .unwrap(); + let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_state = store + .get_state(&wss_state_root, Some(checkpoint_slot)) + .unwrap() + .unwrap(); + + let kzg = spec.deneb_fork_epoch.map(|_| KZG.clone()); + + let mock = + mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + + harness.advance_slot(); + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + // Initialise a new beacon chain from the finalized checkpoint. + // The slot clock must be set to a time ahead of the checkpoint state. + let slot_clock = TestingSlotClock::new( + Slot::new(0), + Duration::from_secs(harness.chain.genesis_time), + Duration::from_secs(seconds_per_slot), + ); + slot_clock.set_slot(harness.get_current_slot().as_u64()); + + let (shutdown_tx, _shutdown_rx) = futures::channel::mpsc::channel(1); + + let beacon_chain = BeaconChainBuilder::>::new(MinimalEthSpec) + .store(store.clone()) + .custom_spec(test_spec::()) + .task_executor(harness.chain.task_executor.clone()) + .logger(log.clone()) + .weak_subjectivity_state( + wss_state, + wss_block.clone(), + wss_blobs_opt.clone(), + genesis_state, + ) + .unwrap() + .store_migrator_config(MigratorConfig::default().blocking()) + .dummy_eth1_backend() + .expect("should build dummy backend") + .slot_clock(slot_clock) + .shutdown_sender(shutdown_tx) + .chain_config(ChainConfig::default()) + .event_handler(Some(ServerSentEventHandler::new_with_capacity( + log.clone(), + 1, + ))) + .execution_layer(Some(mock.el)) + .kzg(kzg) + .build() + .expect("should build"); + + let beacon_chain = Arc::new(beacon_chain); + + let current_state = harness.get_current_state(); + + if ForkName::Electra == current_state.fork_name_unchecked() { + // TODO(electra) fix beacon state `compute_merkle_proof` + return; + } + + let block_root = *current_state + .get_block_root(current_state.slot() - Slot::new(1)) + .unwrap(); + + let contributions = harness.make_sync_contributions( + ¤t_state, + block_root, + current_state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + // generate sync aggregates + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + beacon_chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + beacon_chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + // check that we can fetch the newly generated sync aggregate + let sync_aggregate = beacon_chain + .op_pool + .get_sync_aggregate(¤t_state) + .unwrap() + .unwrap(); + + // cache light client data + beacon_chain + .light_client_server_cache + .recompute_and_cache_updates( + store.clone(), + current_state.slot() - Slot::new(1), + &block_root, + &sync_aggregate, + &log, + &spec, + ) + .unwrap(); + + // calculate the sync period from the previous slot + let sync_period = (current_state.slot() - Slot::new(1)) + .epoch(E::slots_per_epoch()) + .sync_committee_period(&spec) + .unwrap(); + + // fetch a range of light client updates. right now there should only be one light client update + // in the db. + let lc_updates = beacon_chain + .get_light_client_updates(sync_period, 100) + .unwrap(); + + assert_eq!(lc_updates.len(), 1); + + // Advance to the next sync committee period + for _i in 0..(E::slots_per_epoch() * u64::from(spec.epochs_per_sync_committee_period)) { + harness.advance_slot(); + } + + harness + .extend_chain( + num_final_blocks as usize, + BlockStrategy::OnCanonicalHead, + AttestationStrategy::AllValidators, + ) + .await; + + let current_state = harness.get_current_state(); + + let block_root = *current_state + .get_block_root(current_state.slot() - Slot::new(1)) + .unwrap(); + + let contributions = harness.make_sync_contributions( + ¤t_state, + block_root, + current_state.slot() - Slot::new(1), + RelativeSyncCommittee::Current, + ); + + // generate new sync aggregates from this new state + for (_, contribution_and_proof) in contributions { + let contribution = contribution_and_proof + .expect("contribution exists for committee") + .message + .contribution; + beacon_chain + .op_pool + .insert_sync_contribution(contribution.clone()) + .unwrap(); + beacon_chain + .op_pool + .insert_sync_contribution(contribution) + .unwrap(); + } + + let sync_aggregate = beacon_chain + .op_pool + .get_sync_aggregate(¤t_state) + .unwrap() + .unwrap(); + + // cache new light client data + beacon_chain + .light_client_server_cache + .recompute_and_cache_updates( + store.clone(), + current_state.slot() - Slot::new(1), + &block_root, + &sync_aggregate, + &log, + &spec, + ) + .unwrap(); + + // we should now have two light client updates in the db + let lc_updates = beacon_chain + .get_light_client_updates(sync_period, 100) + .unwrap(); + + assert_eq!(lc_updates.len(), 2); +} + /// Tests that `store.heal_freezer_block_roots_at_split` inserts block roots between last restore point /// slot and the split slot. #[tokio::test] diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index f98f4493964..aa47d5c4649 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -13,6 +13,7 @@ mod block_rewards; mod build_block_contents; mod builder_states; mod database; +mod light_client; mod metrics; mod produce_block; mod proposer_duties; @@ -30,6 +31,7 @@ mod validator_inclusion; mod validators; mod version; +use crate::light_client::get_light_client_updates; use crate::produce_block::{produce_blinded_block_v2, produce_block_v2, produce_block_v3}; use crate::version::fork_versioned_response; use beacon_chain::{ @@ -44,8 +46,8 @@ use bytes::Bytes; use directory::DEFAULT_ROOT_DIR; use eth2::types::{ self as api_types, BroadcastValidation, EndpointVersion, ForkChoice, ForkChoiceNode, - PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, ValidatorStatus, - ValidatorsRequestBody, + LightClientUpdatesQuery, PublishBlockRequest, ValidatorBalancesRequestBody, ValidatorId, + ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; @@ -2484,6 +2486,25 @@ pub fn serve( }, ); + // GET beacon/light_client/updates + let get_beacon_light_client_updates = beacon_light_client_path + .clone() + .and(task_spawner_filter.clone()) + .and(warp::path("updates")) + .and(warp::path::end()) + .and(warp::query::()) + .and(warp::header::optional::("accept")) + .then( + |chain: Arc>, + task_spawner: TaskSpawner, + query: LightClientUpdatesQuery, + accept_header: Option| { + task_spawner.blocking_response_task(Priority::P1, move || { + get_light_client_updates::(chain, query, accept_header) + }) + }, + ); + /* * beacon/rewards */ @@ -4640,6 +4661,10 @@ pub fn serve( enable(ctx.config.enable_light_client_server) .and(get_beacon_light_client_bootstrap), ) + .uor( + enable(ctx.config.enable_light_client_server) + .and(get_beacon_light_client_updates), + ) .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs new file mode 100644 index 00000000000..a6543114b85 --- /dev/null +++ b/beacon_node/http_api/src/light_client.rs @@ -0,0 +1,143 @@ +use beacon_chain::{BeaconChain, BeaconChainTypes}; +use eth2::types::{ + self as api_types, ChainSpec, ForkVersionedResponse, LightClientUpdate, + LightClientUpdateResponseChunk, LightClientUpdateSszResponse, LightClientUpdatesQuery, +}; +use ssz::Encode; +use std::sync::Arc; +use warp::{ + hyper::{Body, Response}, + reply::Reply, + Rejection, +}; + +use crate::version::{add_ssz_content_type_header, fork_versioned_response, V1}; + +const MAX_REQUEST_LIGHT_CLIENT_UPDATES: u64 = 128; + +pub fn get_light_client_updates( + chain: Arc>, + query: LightClientUpdatesQuery, + accept_header: Option, +) -> Result, Rejection> { + validate_light_client_updates_request(&chain, &query)?; + + let light_client_updates = chain + .get_light_client_updates(query.start_period, query.count) + .map_err(|_| { + warp_utils::reject::custom_not_found("No LightClientUpdates found".to_string()) + })?; + + match accept_header { + Some(api_types::Accept::Ssz) => { + let response_chunks = light_client_updates + .iter() + .map(|update| map_light_client_update_to_ssz_chunk::(&chain, update)) + .collect::>(); + + let ssz_response = LightClientUpdateSszResponse { + response_chunk_len: (light_client_updates.len() as u64).to_le_bytes().to_vec(), + response_chunk: response_chunks.as_ssz_bytes(), + } + .as_ssz_bytes(); + + Response::builder() + .status(200) + .body(ssz_response) + .map(|res: Response>| add_ssz_content_type_header(res)) + .map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "failed to create response: {}", + e + )) + }) + } + _ => { + let fork_versioned_response = light_client_updates + .iter() + .map(|update| map_light_client_update_to_json_response::(&chain, update.clone())) + .collect::>>, Rejection>>()?; + Ok(warp::reply::json(&fork_versioned_response).into_response()) + } + } +} + +pub fn validate_light_client_updates_request( + chain: &BeaconChain, + query: &LightClientUpdatesQuery, +) -> Result<(), Rejection> { + if query.count > MAX_REQUEST_LIGHT_CLIENT_UPDATES { + return Err(warp_utils::reject::custom_bad_request( + "Invalid count requested".to_string(), + )); + } + + let current_sync_period = chain + .epoch() + .map_err(|_| { + warp_utils::reject::custom_server_error("failed to get current epoch".to_string()) + })? + .sync_committee_period(&chain.spec) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "failed to get current sync committee period".to_string(), + ) + })?; + + if query.start_period > current_sync_period { + return Err(warp_utils::reject::custom_bad_request( + "Invalid sync committee period requested".to_string(), + )); + } + + let earliest_altair_sync_committee = chain + .spec + .altair_fork_epoch + .ok_or(warp_utils::reject::custom_server_error( + "failed to get altair fork epoch".to_string(), + ))? + .sync_committee_period(&chain.spec) + .map_err(|_| { + warp_utils::reject::custom_server_error( + "failed to get earliest altair sync committee".to_string(), + ) + })?; + + if query.start_period < earliest_altair_sync_committee { + return Err(warp_utils::reject::custom_bad_request( + "Invalid sync committee period requested".to_string(), + )); + } + + Ok(()) +} + +fn map_light_client_update_to_ssz_chunk( + chain: &BeaconChain, + light_client_update: &LightClientUpdate, +) -> LightClientUpdateResponseChunk { + let fork_name = chain + .spec + .fork_name_at_slot::(*light_client_update.signature_slot()); + + let fork_digest = ChainSpec::compute_fork_digest( + chain.spec.fork_version_for_name(fork_name), + chain.genesis_validators_root, + ); + + LightClientUpdateResponseChunk { + context: fork_digest, + payload: light_client_update.as_ssz_bytes(), + } +} + +fn map_light_client_update_to_json_response( + chain: &BeaconChain, + light_client_update: LightClientUpdate, +) -> Result>, Rejection> { + let fork_name = chain + .spec + .fork_name_at_slot::(*light_client_update.signature_slot()); + + fork_versioned_response(V1, fork_name, light_client_update) +} diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index d51799b8661..9377e277c21 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1813,6 +1813,36 @@ impl ApiTester { self } + pub async fn test_get_beacon_light_client_updates(self) -> Self { + let current_epoch = self.chain.epoch().unwrap(); + let current_sync_committee_period = current_epoch + .sync_committee_period(&self.chain.spec) + .unwrap(); + + let result = match self + .client + .get_beacon_light_client_updates::(current_sync_committee_period as u64, 1) + .await + { + Ok(result) => result, + Err(e) => panic!("query failed incorrectly: {e:?}"), + }; + + let expected = self + .chain + .light_client_server_cache + .get_light_client_updates( + &self.chain.store, + current_sync_committee_period as u64, + 1, + &self.chain.spec, + ) + .unwrap(); + + assert_eq!(result.clone().unwrap().len(), expected.len()); + self + } + pub async fn test_get_beacon_light_client_bootstrap(self) -> Self { let block_id = BlockId(CoreBlockId::Finalized); let (block_root, _, _) = block_id.root(&self.chain).unwrap(); @@ -6171,6 +6201,18 @@ async fn node_get() { .await; } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn get_light_client_updates() { + let config = ApiTesterConfig { + spec: ForkName::Altair.make_genesis_spec(E::default_spec()), + ..<_>::default() + }; + ApiTester::new_from_config(config) + .await + .test_get_beacon_light_client_updates() + .await; +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn get_light_client_bootstrap() { let config = ApiTesterConfig { diff --git a/beacon_node/store/src/leveldb_store.rs b/beacon_node/store/src/leveldb_store.rs index 32ff942ddc7..28e04f56205 100644 --- a/beacon_node/store/src/leveldb_store.rs +++ b/beacon_node/store/src/leveldb_store.rs @@ -182,7 +182,6 @@ impl KeyValueStore for LevelDB { fn iter_column_from(&self, column: DBColumn, from: &[u8]) -> ColumnIter { let start_key = BytesKey::from_vec(get_key_for_col(column.into(), from)); - let iter = self.db.iter(self.read_options()); iter.seek(&start_key); diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 1f8cc8ca019..e8631cc5ec1 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -300,6 +300,9 @@ pub enum DBColumn { BeaconHistoricalSummaries, #[strum(serialize = "olc")] OverflowLRUCache, + /// For persisting eagerly computed light client data + #[strum(serialize = "lcu")] + LightClientUpdate, } /// A block from the database, which might have an execution payload or not. @@ -342,7 +345,8 @@ impl DBColumn { | Self::BeaconStateRoots | Self::BeaconHistoricalRoots | Self::BeaconHistoricalSummaries - | Self::BeaconRandaoMixes => 8, + | Self::BeaconRandaoMixes + | Self::LightClientUpdate => 8, Self::BeaconDataColumn => DATA_COLUMN_DB_KEY_SIZE, } } diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 6d000f576f9..48cdf7031a1 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -763,6 +763,31 @@ impl BeaconNodeHttpClient { self.get_opt(path).await } + /// `GET beacon/light_client/updates` + /// + /// Returns `Ok(None)` on a 404 error. + pub async fn get_beacon_light_client_updates( + &self, + start_period: u64, + count: u64, + ) -> Result>>>, Error> { + let mut path = self.eth_path(V1)?; + + path.path_segments_mut() + .map_err(|()| Error::InvalidUrl(self.server.clone()))? + .push("beacon") + .push("light_client") + .push("updates"); + + path.query_pairs_mut() + .append_pair("start_period", &start_period.to_string()); + + path.query_pairs_mut() + .append_pair("count", &count.to_string()); + + self.get_opt(path).await + } + /// `GET beacon/light_client/bootstrap` /// /// Returns `Ok(None)` on a 404 error. diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index fa5fb654b72..793d839ceea 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -784,6 +784,24 @@ pub struct ValidatorAggregateAttestationQuery { pub committee_index: Option, } +#[derive(Clone, Deserialize)] +pub struct LightClientUpdatesQuery { + pub start_period: u64, + pub count: u64, +} + +#[derive(Encode, Decode)] +pub struct LightClientUpdateSszResponse { + pub response_chunk_len: Vec, + pub response_chunk: Vec, +} + +#[derive(Encode, Decode)] +pub struct LightClientUpdateResponseChunk { + pub context: [u8; 4], + pub payload: Vec, +} + #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)] pub struct BeaconCommitteeSubscription { #[serde(with = "serde_utils::quoted_u64")] diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index e65b0572923..dc7561f5fcc 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -192,6 +192,19 @@ impl LightClientFinalityUpdate { // `2 *` because there are two headers in the update fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + + // Implements spec prioritization rules: + // > Full nodes SHOULD provide the LightClientFinalityUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) + // + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_finality_update + pub fn is_latest(&self, attested_slot: Slot, signature_slot: Slot) -> bool { + let prev_slot = self.get_attested_header_slot(); + if attested_slot > prev_slot { + true + } else { + attested_slot == prev_slot && signature_slot > *self.signature_slot() + } + } } impl ForkVersionDeserialize for LightClientFinalityUpdate { diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 1feb748fae1..a1d5f85eac0 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -149,6 +149,15 @@ impl LightClientHeaderAltair { } } +impl Default for LightClientHeaderAltair { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderCapella { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -180,6 +189,17 @@ impl LightClientHeaderCapella { } } +impl Default for LightClientHeaderCapella { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderCapella::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderDeneb { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -211,6 +231,17 @@ impl LightClientHeaderDeneb { } } +impl Default for LightClientHeaderDeneb { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderDeneb::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl LightClientHeaderElectra { pub fn block_to_light_client_header( block: &SignedBlindedBeaconBlock, @@ -242,6 +273,17 @@ impl LightClientHeaderElectra { } } +impl Default for LightClientHeaderElectra { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderElectra::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl ForkVersionDeserialize for LightClientHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index f5b749be706..3cae31edf80 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -178,6 +178,19 @@ impl LightClientOptimisticUpdate { }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } + + // Implements spec prioritization rules: + // > Full nodes SHOULD provide the LightClientOptimisticUpdate with the highest attested_header.beacon.slot (if multiple, highest signature_slot) + // + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_optimistic_update + pub fn is_latest(&self, attested_slot: Slot, signature_slot: Slot) -> bool { + let prev_slot = self.get_slot(); + if attested_slot > prev_slot { + true + } else { + attested_slot == prev_slot && signature_slot > *self.signature_slot() + } + } } impl ForkVersionDeserialize for LightClientOptimisticUpdate { diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index 8a3eaff487f..3b48a68df31 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -1,12 +1,13 @@ use super::{EthSpec, FixedVector, Hash256, Slot, SyncAggregate, SyncCommittee}; use crate::light_client_header::LightClientHeaderElectra; use crate::{ - beacon_state, test_utils::TestRandom, BeaconBlock, BeaconBlockHeader, BeaconState, ChainSpec, - ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, SignedBlindedBeaconBlock, + beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, + LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, + SignedBlindedBeaconBlock, }; use derivative::Derivative; use safe_arith::ArithError; +use safe_arith::SafeArith; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use ssz::Decode; @@ -16,7 +17,6 @@ use ssz_types::typenum::{U4, U5, U6}; use std::sync::Arc; use superstruct::superstruct; use test_random_derive::TestRandom; -use tree_hash::TreeHash; use tree_hash_derive::TreeHash; pub const FINALIZED_ROOT_INDEX: usize = 105; @@ -35,6 +35,9 @@ pub const CURRENT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const NEXT_SYNC_COMMITTEE_PROOF_LEN: usize = 5; pub const EXECUTION_PAYLOAD_PROOF_LEN: usize = 4; +type FinalityBranch = FixedVector; +type NextSyncCommitteeBranch = FixedVector; + #[derive(Debug, PartialEq, Clone)] pub enum Error { SszTypesError(ssz_types::Error), @@ -117,7 +120,7 @@ pub struct LightClientUpdate { /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, /// Merkle proof for next sync committee - pub next_sync_committee_branch: FixedVector, + pub next_sync_committee_branch: NextSyncCommitteeBranch, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -128,7 +131,7 @@ pub struct LightClientUpdate { #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, /// Merkle proof attesting finalized header. - pub finality_branch: FixedVector, + pub finality_branch: FinalityBranch, /// current sync aggreggate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -152,45 +155,17 @@ impl ForkVersionDeserialize for LightClientUpdate { } impl LightClientUpdate { + #[allow(clippy::too_many_arguments)] pub fn new( - beacon_state: BeaconState, - block: BeaconBlock, - attested_state: &mut BeaconState, + sync_aggregate: &SyncAggregate, + block_slot: Slot, + next_sync_committee: Arc>, + next_sync_committee_branch: FixedVector, + finality_branch: FixedVector, attested_block: &SignedBlindedBeaconBlock, - finalized_block: &SignedBlindedBeaconBlock, + finalized_block: Option<&SignedBlindedBeaconBlock>, chain_spec: &ChainSpec, ) -> Result { - let sync_aggregate = block.body().sync_aggregate()?; - if sync_aggregate.num_set_bits() < chain_spec.min_sync_committee_participants as usize { - return Err(Error::NotEnoughSyncCommitteeParticipants); - } - - let signature_period = block.epoch().sync_committee_period(chain_spec)?; - // Compute and validate attested header. - let mut attested_header = attested_state.latest_block_header().clone(); - attested_header.state_root = attested_state.update_tree_hash_cache()?; - let attested_period = attested_header - .slot - .epoch(E::slots_per_epoch()) - .sync_committee_period(chain_spec)?; - if attested_period != signature_period { - return Err(Error::MismatchingPeriods); - } - // Build finalized header from finalized block - let finalized_header = BeaconBlockHeader { - slot: finalized_block.slot(), - proposer_index: finalized_block.message().proposer_index(), - parent_root: finalized_block.parent_root(), - state_root: finalized_block.state_root(), - body_root: finalized_block.message().body_root(), - }; - if finalized_header.tree_hash_root() != beacon_state.finalized_checkpoint().root { - return Err(Error::InvalidFinalizedBlock); - } - let next_sync_committee_branch = - attested_state.compute_merkle_proof(NEXT_SYNC_COMMITTEE_INDEX)?; - let finality_branch = attested_state.compute_merkle_proof(FINALIZED_ROOT_INDEX)?; - let light_client_update = match attested_block .fork_name(chain_spec) .map_err(|_| Error::InconsistentFork)? @@ -199,71 +174,91 @@ impl LightClientUpdate { ForkName::Altair | ForkName::Bellatrix => { let attested_header = LightClientHeaderAltair::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderAltair::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderAltair::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderAltair::default() + }; + Self::Altair(LightClientUpdateAltair { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Capella => { let attested_header = LightClientHeaderCapella::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderCapella::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderCapella::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderCapella::default() + }; + Self::Capella(LightClientUpdateCapella { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Deneb => { let attested_header = LightClientHeaderDeneb::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderDeneb::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderDeneb::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderDeneb::default() + }; + Self::Deneb(LightClientUpdateDeneb { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } ForkName::Electra => { let attested_header = LightClientHeaderElectra::block_to_light_client_header(attested_block)?; - let finalized_header = - LightClientHeaderElectra::block_to_light_client_header(finalized_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderElectra::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderElectra::default() + }; + Self::Electra(LightClientUpdateElectra { attested_header, - next_sync_committee: attested_state.next_sync_committee()?.clone(), - next_sync_committee_branch: FixedVector::new(next_sync_committee_branch)?, + next_sync_committee, + next_sync_committee_branch, finalized_header, - finality_branch: FixedVector::new(finality_branch)?, + finality_branch, sync_aggregate: sync_aggregate.clone(), - signature_slot: block.slot(), + signature_slot: block_slot, }) } // To add a new fork, just append the new fork variant on the latest fork. Forks that - // have a distinct execution header will need a new LightClientUdpate variant only + // have a distinct execution header will need a new LightClientUpdate variant only // if you need to test or support lightclient usages }; Ok(light_client_update) } - pub fn from_ssz_bytes(bytes: &[u8], fork_name: ForkName) -> Result { + pub fn from_ssz_bytes(bytes: &[u8], fork_name: &ForkName) -> Result { let update = match fork_name { ForkName::Altair | ForkName::Bellatrix => { Self::Altair(LightClientUpdateAltair::from_ssz_bytes(bytes)?) @@ -280,6 +275,142 @@ impl LightClientUpdate { Ok(update) } + + pub fn attested_header_slot(&self) -> Slot { + match self { + LightClientUpdate::Altair(update) => update.attested_header.beacon.slot, + LightClientUpdate::Capella(update) => update.attested_header.beacon.slot, + LightClientUpdate::Deneb(update) => update.attested_header.beacon.slot, + LightClientUpdate::Electra(update) => update.attested_header.beacon.slot, + } + } + + pub fn finalized_header_slot(&self) -> Slot { + match self { + LightClientUpdate::Altair(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Capella(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Deneb(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Electra(update) => update.finalized_header.beacon.slot, + } + } + + fn attested_header_sync_committee_period( + &self, + chain_spec: &ChainSpec, + ) -> Result { + compute_sync_committee_period_at_slot::(self.attested_header_slot(), chain_spec) + .map_err(Error::ArithError) + } + + fn signature_slot_sync_committee_period(&self, chain_spec: &ChainSpec) -> Result { + compute_sync_committee_period_at_slot::(*self.signature_slot(), chain_spec) + .map_err(Error::ArithError) + } + + pub fn is_sync_committee_update(&self, chain_spec: &ChainSpec) -> Result { + Ok(!self.is_next_sync_committee_branch_empty() + && (self.attested_header_sync_committee_period(chain_spec)? + == self.signature_slot_sync_committee_period(chain_spec)?)) + } + + pub fn has_sync_committee_finality(&self, chain_spec: &ChainSpec) -> Result { + Ok( + compute_sync_committee_period_at_slot::(self.finalized_header_slot(), chain_spec)? + == self.attested_header_sync_committee_period(chain_spec)?, + ) + } + + // Implements spec prioritization rules: + // Full nodes SHOULD provide the best derivable LightClientUpdate for each sync committee period + // ref: https://github.com/ethereum/consensus-specs/blob/113c58f9bf9c08867f6f5f633c4d98e0364d612a/specs/altair/light-client/full-node.md#create_light_client_update + pub fn is_better_light_client_update( + &self, + new: &Self, + chain_spec: &ChainSpec, + ) -> Result { + // Compare super majority (> 2/3) sync committee participation + let max_active_participants = new.sync_aggregate().sync_committee_bits.len(); + + let new_active_participants = new.sync_aggregate().sync_committee_bits.num_set_bits(); + let prev_active_participants = self.sync_aggregate().sync_committee_bits.num_set_bits(); + + let new_has_super_majority = + new_active_participants.safe_mul(3)? >= max_active_participants.safe_mul(2)?; + let prev_has_super_majority = + prev_active_participants.safe_mul(3)? >= max_active_participants.safe_mul(2)?; + + if new_has_super_majority != prev_has_super_majority { + return Ok(new_has_super_majority); + } + + if !new_has_super_majority && new_active_participants != prev_active_participants { + return Ok(new_active_participants > prev_active_participants); + } + + // Compare presence of relevant sync committee + let new_has_relevant_sync_committee = new.is_sync_committee_update(chain_spec)?; + let prev_has_relevant_sync_committee = self.is_sync_committee_update(chain_spec)?; + if new_has_relevant_sync_committee != prev_has_relevant_sync_committee { + return Ok(new_has_relevant_sync_committee); + } + + // Compare indication of any finality + let new_has_finality = !new.is_finality_branch_empty(); + let prev_has_finality = !self.is_finality_branch_empty(); + if new_has_finality != prev_has_finality { + return Ok(new_has_finality); + } + + // Compare sync committee finality + if new_has_finality { + let new_has_sync_committee_finality = new.has_sync_committee_finality(chain_spec)?; + let prev_has_sync_committee_finality = self.has_sync_committee_finality(chain_spec)?; + if new_has_sync_committee_finality != prev_has_sync_committee_finality { + return Ok(new_has_sync_committee_finality); + } + } + + // Tiebreaker 1: Sync committee participation beyond super majority + if new_active_participants != prev_active_participants { + return Ok(new_active_participants > prev_active_participants); + } + + let new_attested_header_slot = new.attested_header_slot(); + let prev_attested_header_slot = self.attested_header_slot(); + + // Tiebreaker 2: Prefer older data (fewer changes to best) + if new_attested_header_slot != prev_attested_header_slot { + return Ok(new_attested_header_slot < prev_attested_header_slot); + } + + return Ok(new.signature_slot() < self.signature_slot()); + } + + fn is_next_sync_committee_branch_empty(&self) -> bool { + for index in self.next_sync_committee_branch().iter() { + if *index != Hash256::default() { + return false; + } + } + true + } + + pub fn is_finality_branch_empty(&self) -> bool { + for index in self.finality_branch().iter() { + if *index != Hash256::default() { + return false; + } + } + true + } +} + +fn compute_sync_committee_period_at_slot( + slot: Slot, + chain_spec: &ChainSpec, +) -> Result { + slot.epoch(E::slots_per_epoch()) + .safe_div(chain_spec.epochs_per_sync_committee_period) } #[cfg(test)] diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index e1a308f7a40..f1ab5ad600d 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -26,7 +26,9 @@ "tests/.*/.*/ssz_static/Eth1Block/", "tests/.*/.*/ssz_static/PowBlock/", # light_client - "tests/.*/.*/light_client", + # "tests/.*/.*/light_client", + "tests/.*/.*/light_client/single_merkle_proof", + "tests/.*/.*/light_client/sync", # LightClientStore "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index f328fa64047..2d6f661f0e4 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -24,6 +24,7 @@ mod kzg_compute_kzg_proof; mod kzg_verify_blob_kzg_proof; mod kzg_verify_blob_kzg_proof_batch; mod kzg_verify_kzg_proof; +mod light_client_verify_is_better_update; mod merkle_proof_validity; mod operations; mod rewards; @@ -54,6 +55,7 @@ pub use kzg_compute_kzg_proof::*; pub use kzg_verify_blob_kzg_proof::*; pub use kzg_verify_blob_kzg_proof_batch::*; pub use kzg_verify_kzg_proof::*; +pub use light_client_verify_is_better_update::*; pub use merkle_proof_validity::*; pub use operations::*; pub use rewards::RewardsTest; diff --git a/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs new file mode 100644 index 00000000000..de281d906c1 --- /dev/null +++ b/testing/ef_tests/src/cases/light_client_verify_is_better_update.rs @@ -0,0 +1,110 @@ +use super::*; +use decode::ssz_decode_light_client_update; +use serde::Deserialize; +use types::{LightClientUpdate, Slot}; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct LightClientVerifyIsBetterUpdate { + light_client_updates: Vec>, +} + +#[derive(Debug, Clone, Default, Deserialize)] +pub struct Metadata { + updates_count: u64, +} + +impl LoadCase for LightClientVerifyIsBetterUpdate { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let mut light_client_updates = vec![]; + let metadata: Metadata = decode::yaml_decode_file(path.join("meta.yaml").as_path())?; + for index in 0..metadata.updates_count { + let light_client_update = ssz_decode_light_client_update( + &path.join(format!("updates_{}.ssz_snappy", index)), + &fork_name, + )?; + light_client_updates.push(light_client_update); + } + + Ok(Self { + light_client_updates, + }) + } +} + +impl Case for LightClientVerifyIsBetterUpdate { + // Light client updates in `self.light_client_updates` are ordered in descending precedence + // where the update at index = 0 is considered the best update. This test iterates through + // all light client updates in a nested loop to make all possible comparisons. If a light client update + // at index `i`` is considered 'better' than a light client update at index `j`` when `i > j`, this test fails. + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { + let spec = fork_name.make_genesis_spec(E::default_spec()); + for (i, ith_light_client_update) in self.light_client_updates.iter().enumerate() { + for (j, jth_light_client_update) in self.light_client_updates.iter().enumerate() { + eprintln!("{i} {j}"); + if i == j { + continue; + } + + let is_better_update = ith_light_client_update + .is_better_light_client_update(jth_light_client_update, &spec) + .unwrap(); + + let ith_summary = + LightClientUpdateSummary::from_update(ith_light_client_update, &spec); + let jth_summary = + LightClientUpdateSummary::from_update(jth_light_client_update, &spec); + + let (best_index, other_index, best_update, other_update, failed) = if i < j { + // i is better, so is_better_update must return false + (i, j, ith_summary, jth_summary, is_better_update) + } else { + // j is better, so is_better must return true + (j, i, jth_summary, ith_summary, !is_better_update) + }; + + if failed { + eprintln!("is_better_update: {is_better_update}"); + eprintln!("index {best_index} update {best_update:?}"); + eprintln!("index {other_index} update {other_update:?}"); + eprintln!( + "update at index {best_index} must be considered better than update at index {other_index}" + ); + return Err(Error::FailedComparison(format!( + "update at index {best_index} must be considered better than update at index {other_index}" + ))); + } + } + } + + Ok(()) + } +} + +#[derive(Debug)] +#[allow(dead_code)] +struct LightClientUpdateSummary { + participants: usize, + supermajority: bool, + relevant_sync_committee: bool, + has_finality: bool, + has_sync_committee_finality: bool, + header_slot: Slot, + signature_slot: Slot, +} + +impl LightClientUpdateSummary { + fn from_update(update: &LightClientUpdate, spec: &ChainSpec) -> Self { + let max_participants = update.sync_aggregate().sync_committee_bits.len(); + let participants = update.sync_aggregate().sync_committee_bits.num_set_bits(); + Self { + participants, + supermajority: participants * 3 > max_participants * 2, + relevant_sync_committee: update.is_sync_committee_update(spec).unwrap(), + has_finality: !update.is_finality_branch_empty(), + has_sync_committee_finality: update.has_sync_committee_finality(spec).unwrap(), + header_slot: update.attested_header_slot(), + signature_slot: *update.signature_slot(), + } + } +} diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 51ab682f3dc..757b9bf3c43 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -5,7 +5,7 @@ use std::fs::{self}; use std::io::Write; use std::path::Path; use std::path::PathBuf; -use types::BeaconState; +use types::{BeaconState, LightClientUpdate}; /// See `log_file_access` for details. const ACCESSED_FILE_LOG_FILENAME: &str = ".accessed_file_log.txt"; @@ -95,3 +95,13 @@ pub fn ssz_decode_state( log_file_access(path); ssz_decode_file_with(path, |bytes| BeaconState::from_ssz_bytes(bytes, spec)) } + +pub fn ssz_decode_light_client_update( + path: &Path, + fork_name: &ForkName, +) -> Result, Error> { + log_file_access(path); + ssz_decode_file_with(path, |bytes| { + LightClientUpdate::from_ssz_bytes(bytes, fork_name) + }) +} diff --git a/testing/ef_tests/src/error.rs b/testing/ef_tests/src/error.rs index c5795777ada..389308377c7 100644 --- a/testing/ef_tests/src/error.rs +++ b/testing/ef_tests/src/error.rs @@ -14,6 +14,8 @@ pub enum Error { SkippedKnownFailure, /// The test failed due to some internal error preventing the test from running. InternalError(String), + /// The test failed while making some comparison. + FailedComparison(String), } impl Error { @@ -26,6 +28,7 @@ impl Error { Error::SkippedBls => "SkippedBls", Error::SkippedKnownFailure => "SkippedKnownFailure", Error::InternalError(_) => "InternalError", + Error::FailedComparison(_) => "FailedComparison", } } diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 410a37e7682..52fc58f3d8c 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -837,6 +837,32 @@ impl Handler for KzgInclusionMerkleProofValidityHandler(PhantomData); + +impl Handler for LightClientUpdateHandler { + type Case = cases::LightClientVerifyIsBetterUpdate; + + fn config_name() -> &'static str { + E::name() + } + + fn runner_name() -> &'static str { + "light_client" + } + + fn handler_name(&self) -> String { + "update_ranking".into() + } + + fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { + // Enabled in Altair + // TODO(electra) re-enable once https://github.com/sigp/lighthouse/issues/6002 is resolved + fork_name != ForkName::Base && fork_name != ForkName::Electra + } +} + #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct OperationsHandler(PhantomData<(E, O)>); diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 10a57a6b45e..90143850443 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -900,6 +900,11 @@ fn merkle_proof_validity() { MerkleProofValidityHandler::::default().run(); } +#[test] +fn light_client_update() { + LightClientUpdateHandler::::default().run(); +} + #[test] #[cfg(feature = "fake_crypto")] fn kzg_inclusion_merkle_proof_validity() { From 781c5ecb1f206f701fb1fdc77fe4269002c35ff5 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 12 Aug 2024 12:31:18 +1000 Subject: [PATCH 14/18] Add lcli command for manual rescue sync (#5458) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Rescue CLI * Allow tweaking start block * More caching * Merge branch 'unstable' into rescue-cli # Conflicts: # lcli/src/main.rs * Add `--known–common-ancestor` flag to optimise for download speed. * Rename rescue command to `http-sync` * Add logging * Add optional `--block-cache-dir` cli arg and create directory if it doesn't already exist. * Lint fix. * Merge branch 'unstable' into rescue-cli --- lcli/src/http_sync.rs | 152 ++++++++++++++++++++++++++++++++++++++++++ lcli/src/main.rs | 74 ++++++++++++++++++++ 2 files changed, 226 insertions(+) create mode 100644 lcli/src/http_sync.rs diff --git a/lcli/src/http_sync.rs b/lcli/src/http_sync.rs new file mode 100644 index 00000000000..1ef40e63978 --- /dev/null +++ b/lcli/src/http_sync.rs @@ -0,0 +1,152 @@ +use clap::ArgMatches; +use clap_utils::{parse_optional, parse_required}; +use environment::Environment; +use eth2::{ + types::{BlockId, ChainSpec, ForkName, PublishBlockRequest, SignedBlockContents}, + BeaconNodeHttpClient, Error, SensitiveUrl, Timeouts, +}; +use eth2_network_config::Eth2NetworkConfig; +use ssz::Encode; +use std::fs; +use std::fs::File; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; +use types::EthSpec; + +const HTTP_TIMEOUT: Duration = Duration::from_secs(3600); +const DEFAULT_CACHE_DIR: &str = "./cache"; + +pub fn run( + env: Environment, + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let executor = env.core_context().executor; + executor + .handle() + .ok_or("shutdown in progress")? + .block_on(async move { run_async::(network_config, matches).await }) +} + +pub async fn run_async( + network_config: Eth2NetworkConfig, + matches: &ArgMatches, +) -> Result<(), String> { + let spec = &network_config.chain_spec::()?; + let source_url: SensitiveUrl = parse_required(matches, "source-url")?; + let target_url: SensitiveUrl = parse_required(matches, "target-url")?; + let start_block: BlockId = parse_required(matches, "start-block")?; + let maybe_common_ancestor_block: Option = + parse_optional(matches, "known–common-ancestor")?; + let cache_dir_path: PathBuf = + parse_optional(matches, "block-cache-dir")?.unwrap_or(DEFAULT_CACHE_DIR.into()); + + let source = BeaconNodeHttpClient::new(source_url, Timeouts::set_all(HTTP_TIMEOUT)); + let target = BeaconNodeHttpClient::new(target_url, Timeouts::set_all(HTTP_TIMEOUT)); + + if !cache_dir_path.exists() { + fs::create_dir_all(&cache_dir_path) + .map_err(|e| format!("Unable to create block cache dir: {:?}", e))?; + } + + // 1. Download blocks back from head, looking for common ancestor. + let mut blocks = vec![]; + let mut next_block_id = start_block; + loop { + println!("downloading {next_block_id:?}"); + + let publish_block_req = + get_block_from_source::(&source, next_block_id, spec, &cache_dir_path).await; + let block = publish_block_req.signed_block(); + + next_block_id = BlockId::Root(block.parent_root()); + blocks.push((block.slot(), publish_block_req)); + + if let Some(ref common_ancestor_block) = maybe_common_ancestor_block { + if common_ancestor_block == &next_block_id { + println!("reached known common ancestor: {next_block_id:?}"); + break; + } + } + + let block_exists_in_target = target + .get_beacon_blocks_ssz::(next_block_id, spec) + .await + .unwrap() + .is_some(); + if block_exists_in_target { + println!("common ancestor found: {next_block_id:?}"); + break; + } + } + + // 2. Apply blocks to target. + for (slot, block) in blocks.iter().rev() { + println!("posting block at slot {slot}"); + if let Err(e) = target.post_beacon_blocks(block).await { + if let Error::ServerMessage(ref e) = e { + if e.code == 202 { + println!("duplicate block detected while posting block at slot {slot}"); + continue; + } + } + return Err(format!("error posting {slot}: {e:?}")); + } else { + println!("success"); + } + } + + println!("SYNCED!!!!"); + + Ok(()) +} + +async fn get_block_from_source( + source: &BeaconNodeHttpClient, + block_id: BlockId, + spec: &ChainSpec, + cache_dir_path: &Path, +) -> PublishBlockRequest { + let mut cache_path = cache_dir_path.join(format!("block_{block_id}")); + + if cache_path.exists() { + let mut f = File::open(&cache_path).unwrap(); + let mut bytes = vec![]; + f.read_to_end(&mut bytes).unwrap(); + PublishBlockRequest::from_ssz_bytes(&bytes, ForkName::Deneb).unwrap() + } else { + let block_from_source = source + .get_beacon_blocks_ssz::(block_id, spec) + .await + .unwrap() + .unwrap(); + let blobs_from_source = source + .get_blobs::(block_id, None) + .await + .unwrap() + .unwrap() + .data; + + let (kzg_proofs, blobs): (Vec<_>, Vec<_>) = blobs_from_source + .iter() + .cloned() + .map(|sidecar| (sidecar.kzg_proof, sidecar.blob.clone())) + .unzip(); + + let block_root = block_from_source.canonical_root(); + let block_contents = SignedBlockContents { + signed_block: Arc::new(block_from_source), + kzg_proofs: kzg_proofs.into(), + blobs: blobs.into(), + }; + let publish_block_req = PublishBlockRequest::BlockContents(block_contents); + + cache_path = cache_dir_path.join(format!("block_{block_root:?}")); + let mut f = File::create(&cache_path).unwrap(); + f.write_all(&publish_block_req.as_ssz_bytes()).unwrap(); + + publish_block_req + } +} diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 85898b60ee4..380aeb6aceb 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -1,6 +1,7 @@ mod block_root; mod check_deposit_data; mod generate_bootnode_enr; +mod http_sync; mod indexed_attestations; mod mnemonic_validators; mod mock_el; @@ -552,6 +553,74 @@ fn main() { .display_order(0) ) ) + .subcommand( + Command::new("http-sync") + .about("Manual sync") + .arg( + Arg::new("start-block") + .long("start-block") + .value_name("BLOCK_ID") + .action(ArgAction::Set) + .help("Block ID of source's head") + .default_value("head") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("source-url") + .long("source-url") + .value_name("URL") + .action(ArgAction::Set) + .help("URL to a synced beacon-API provider") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("target-url") + .long("target-url") + .value_name("URL") + .action(ArgAction::Set) + .help("URL to an unsynced beacon-API provider") + .required(true) + .display_order(0) + ) + .arg( + Arg::new("testnet-dir") + .short('d') + .long("testnet-dir") + .value_name("PATH") + .action(ArgAction::Set) + .global(true) + .help("The testnet dir.") + .display_order(0) + ) + .arg( + Arg::new("network") + .long("network") + .value_name("NAME") + .action(ArgAction::Set) + .global(true) + .help("The network to use. Defaults to mainnet.") + .conflicts_with("testnet-dir") + .display_order(0) + ) + .arg( + Arg::new("known-common-ancestor") + .long("known-common-ancestor") + .value_name("BLOCK_ID") + .action(ArgAction::Set) + .help("Block ID of common ancestor, if known.") + .display_order(0) + ) + .arg( + Arg::new("block-cache-dir") + .long("block-cache-dir") + .value_name("PATH") + .action(ArgAction::Set) + .help("Directory to keep a cache of the downloaded SSZ blocks.") + .display_order(0) + ) + ) .get_matches(); let result = matches @@ -656,6 +725,11 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> } Some(("mock-el", matches)) => mock_el::run::(env, matches) .map_err(|e| format!("Failed to run mock-el command: {}", e)), + Some(("http-sync", matches)) => { + let network_config = get_network_config()?; + http_sync::run::(env, network_config, matches) + .map_err(|e| format!("Failed to run http-sync command: {}", e)) + } Some((other, _)) => Err(format!("Unknown subcommand {}. See --help.", other)), _ => Err("No subcommand provided. See --help.".to_string()), } From f2fdbe7fbe82428a9458deecc2d580533f955ed9 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 12 Aug 2024 12:31:21 +1000 Subject: [PATCH 15/18] Add plumbing for PeerDAS supernodes (#5050, #5409, #5570, #5966) (#6216) * Add plumbing for peerdas supernodes (#5050, #5409, #5570, #5966) - add cli option `--subscribe-to-all-data-columns` - add custody subnet count to ENR, only if PeerDAS is scheduled - subscribe to data column topics, only if PeerDAS is scheduled Co-authored-by: Jacob Kaufmann * Merge branch 'unstable' into das-supernode * Update CLI docs. * Merge branch 'unstable' into das-supernode * Fix fork epoch comparison with `FAR_FUTURE_EPOCH`. * Merge branch 'unstable' into das-supernode * Hide `--subscribe-all-data-column-subnets` flag and update help. * Fix docs only * Merge branch 'unstable' into das-supernode --- beacon_node/beacon_chain/src/builder.rs | 19 ++- .../src/data_availability_checker.rs | 11 +- beacon_node/client/src/builder.rs | 1 + beacon_node/lighthouse_network/src/config.rs | 6 +- .../lighthouse_network/src/discovery/enr.rs | 111 +++++++++++++++++- .../lighthouse_network/src/discovery/mod.rs | 2 +- .../lighthouse_network/src/service/mod.rs | 1 + beacon_node/network/src/service.rs | 56 ++++++++- beacon_node/src/cli.rs | 12 ++ beacon_node/src/config.rs | 4 + consensus/types/src/chain_spec.rs | 7 ++ lcli/src/generate_bootnode_enr.rs | 4 +- lcli/src/main.rs | 6 +- 13 files changed, 223 insertions(+), 17 deletions(-) diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index c86e35980ba..042d14a4fa4 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -104,6 +104,7 @@ pub struct BeaconChainBuilder { kzg: Option>, task_executor: Option, validator_monitor_config: Option, + import_all_data_columns: bool, } impl @@ -145,6 +146,7 @@ where kzg: None, task_executor: None, validator_monitor_config: None, + import_all_data_columns: false, } } @@ -615,6 +617,12 @@ where self } + /// Sets whether to require and import all data columns when importing block. + pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { + self.import_all_data_columns = import_all_data_columns; + self + } + /// Sets the `BeaconChain` event handler backend. /// /// For example, provide `ServerSentEventHandler` as a `handler`. @@ -965,8 +973,15 @@ where validator_monitor: RwLock::new(validator_monitor), genesis_backfill_slot, data_availability_checker: Arc::new( - DataAvailabilityChecker::new(slot_clock, self.kzg.clone(), store, &log, self.spec) - .map_err(|e| format!("Error initializing DataAvailabiltyChecker: {:?}", e))?, + DataAvailabilityChecker::new( + slot_clock, + self.kzg.clone(), + store, + self.import_all_data_columns, + &log, + self.spec, + ) + .map_err(|e| format!("Error initializing DataAvailabilityChecker: {:?}", e))?, ), kzg: self.kzg.clone(), }; diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index ce5995a5581..b4336a054e2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -95,11 +95,16 @@ impl DataAvailabilityChecker { slot_clock: T::SlotClock, kzg: Option>, store: BeaconStore, + import_all_data_columns: bool, log: &Logger, spec: ChainSpec, ) -> Result { - // TODO(das): support supernode or custom custody requirement - let custody_subnet_count = spec.custody_requirement as usize; + let custody_subnet_count = if import_all_data_columns { + spec.data_column_sidecar_subnet_count as usize + } else { + spec.custody_requirement as usize + }; + let custody_column_count = custody_subnet_count.saturating_mul(spec.data_columns_per_subnet()); @@ -112,8 +117,8 @@ impl DataAvailabilityChecker { Ok(Self { availability_cache: Arc::new(overflow_cache), slot_clock, - log: log.clone(), kzg, + log: log.clone(), spec, }) } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 393ce35f000..6695f3c4bc1 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -207,6 +207,7 @@ where .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) + .import_all_data_columns(config.network.subscribe_all_data_column_subnets) .validator_monitor_config(config.validator_monitor.clone()); let builder = if let Some(slasher) = self.slasher.clone() { diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 91c5b62d0b2..7c95977140e 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -42,7 +42,7 @@ pub struct Config { pub network_dir: PathBuf, /// IP addresses to listen on. - listen_addresses: ListenAddress, + pub(crate) listen_addresses: ListenAddress, /// The address to broadcast to peers about which address we are listening on. None indicates /// that no discovery address has been set in the CLI args. @@ -100,6 +100,9 @@ pub struct Config { /// Attempt to construct external port mappings with UPnP. pub upnp_enabled: bool, + /// Subscribe to all data column subnets for the duration of the runtime. + pub subscribe_all_data_column_subnets: bool, + /// Subscribe to all subnets for the duration of the runtime. pub subscribe_all_subnets: bool, @@ -338,6 +341,7 @@ impl Default for Config { upnp_enabled: true, network_load: 4, private: false, + subscribe_all_data_column_subnets: false, subscribe_all_subnets: false, import_all_attestations: false, shutdown_after_sync: false, diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 51e50808e1d..04ae9971502 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -14,7 +14,7 @@ use std::fs::File; use std::io::prelude::*; use std::path::Path; use std::str::FromStr; -use types::{EnrForkId, EthSpec}; +use types::{ChainSpec, EnrForkId, EthSpec}; use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; @@ -24,6 +24,8 @@ pub const ETH2_ENR_KEY: &str = "eth2"; pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; /// The ENR field specifying the sync committee subnet bitfield. pub const SYNC_COMMITTEE_BITFIELD_ENR_KEY: &str = "syncnets"; +/// The ENR field specifying the peerdas custody subnet count. +pub const PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY: &str = "csc"; /// Extension trait for ENR's within Eth2. pub trait Eth2Enr { @@ -35,6 +37,9 @@ pub trait Eth2Enr { &self, ) -> Result, &'static str>; + /// The peerdas custody subnet count associated with the ENR. + fn custody_subnet_count(&self, spec: &ChainSpec) -> u64; + fn eth2(&self) -> Result; } @@ -59,6 +64,16 @@ impl Eth2Enr for Enr { .map_err(|_| "Could not decode the ENR syncnets bitfield") } + /// if the custody value is non-existent in the ENR, then we assume the minimum custody value + /// defined in the spec. + fn custody_subnet_count(&self, spec: &ChainSpec) -> u64 { + self.get_decodable::(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) + .and_then(|r| r.ok()) + // If value supplied in ENR is invalid, fallback to `custody_requirement` + .filter(|csc| csc <= &spec.data_column_sidecar_subnet_count) + .unwrap_or(spec.custody_requirement) + } + fn eth2(&self) -> Result { let eth2_bytes = self.get(ETH2_ENR_KEY).ok_or("ENR has no eth2 field")?; @@ -126,12 +141,13 @@ pub fn build_or_load_enr( config: &NetworkConfig, enr_fork_id: &EnrForkId, log: &slog::Logger, + spec: &ChainSpec, ) -> Result { // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec)?; use_or_load_enr(&enr_key, &mut local_enr, config, log)?; Ok(local_enr) @@ -142,6 +158,7 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, + spec: &ChainSpec, ) -> Result { let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; @@ -221,6 +238,16 @@ pub fn build_enr( builder.add_value(SYNC_COMMITTEE_BITFIELD_ENR_KEY, &bitfield.as_ssz_bytes()); + // only set `csc` if PeerDAS fork epoch has been scheduled + if spec.is_peer_das_scheduled() { + let custody_subnet_count = if config.subscribe_all_data_column_subnets { + spec.data_column_sidecar_subnet_count + } else { + spec.custody_requirement + }; + builder.add_value(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, &custody_subnet_count); + } + builder .build(enr_key) .map_err(|e| format!("Could not build Local ENR: {:?}", e)) @@ -244,10 +271,12 @@ fn compare_enr(local_enr: &Enr, disk_enr: &Enr) -> bool { // take preference over disk udp port if one is not specified && (local_enr.udp4().is_none() || local_enr.udp4() == disk_enr.udp4()) && (local_enr.udp6().is_none() || local_enr.udp6() == disk_enr.udp6()) - // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY key to match, - // otherwise we use a new ENR. This will likely only be true for non-validating nodes + // we need the ATTESTATION_BITFIELD_ENR_KEY and SYNC_COMMITTEE_BITFIELD_ENR_KEY and + // PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY key to match, otherwise we use a new ENR. This will + // likely only be true for non-validating nodes. && local_enr.get(ATTESTATION_BITFIELD_ENR_KEY) == disk_enr.get(ATTESTATION_BITFIELD_ENR_KEY) && local_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) == disk_enr.get(SYNC_COMMITTEE_BITFIELD_ENR_KEY) + && local_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) == disk_enr.get(PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY) } /// Loads enr from the given directory @@ -280,3 +309,77 @@ pub fn save_enr_to_disk(dir: &Path, enr: &Enr, log: &slog::Logger) { } } } + +#[cfg(test)] +mod test { + use super::*; + use crate::config::Config as NetworkConfig; + use types::{Epoch, MainnetEthSpec}; + + type E = MainnetEthSpec; + + fn make_eip7594_spec() -> ChainSpec { + let mut spec = E::default_spec(); + spec.eip7594_fork_epoch = Some(Epoch::new(10)); + spec + } + + #[test] + fn custody_subnet_count_default() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: false, + ..NetworkConfig::default() + }; + let spec = make_eip7594_spec(); + + let enr = build_enr_with_config(config, &spec).0; + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.custody_requirement, + ); + } + + #[test] + fn custody_subnet_count_all() { + let config = NetworkConfig { + subscribe_all_data_column_subnets: true, + ..NetworkConfig::default() + }; + let spec = make_eip7594_spec(); + let enr = build_enr_with_config(config, &spec).0; + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.data_column_sidecar_subnet_count, + ); + } + + #[test] + fn custody_subnet_count_fallback_default() { + let config = NetworkConfig::default(); + let spec = make_eip7594_spec(); + let (mut enr, enr_key) = build_enr_with_config(config, &spec); + let invalid_subnet_count = 99u64; + + enr.insert( + PEERDAS_CUSTODY_SUBNET_COUNT_ENR_KEY, + &invalid_subnet_count, + &enr_key, + ) + .unwrap(); + + assert_eq!( + enr.custody_subnet_count::(&spec), + spec.custody_requirement, + ); + } + + fn build_enr_with_config(config: NetworkConfig, spec: &ChainSpec) -> (Enr, CombinedKey) { + let keypair = libp2p::identity::secp256k1::Keypair::generate(); + let enr_key = CombinedKey::from_secp256k1(&keypair); + let enr_fork_id = EnrForkId::default(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); + (enr, enr_key) + } +} diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 865d707495f..300c190cdaf 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1220,7 +1220,7 @@ mod tests { let mut config = NetworkConfig::default(); config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); - let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default()).unwrap(); + let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); let log = build_log(slog::Level::Debug, false); let globals = NetworkGlobals::new( enr, diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index c2a2a03fe87..fe649f4199a 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -162,6 +162,7 @@ impl Network { &config, &ctx.enr_fork_id, &log, + ctx.chain_spec, )?; // Construct the metadata let meta_data = utils::load_or_build_metadata(&config.network_dir, &log); diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index e522285a9e3..db5fc7636ec 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -16,6 +16,7 @@ use futures::prelude::*; use futures::StreamExt; use lighthouse_network::service::Network; use lighthouse_network::types::GossipKind; +use lighthouse_network::Eth2Enr; use lighthouse_network::{prometheus_client::registry::Registry, MessageAcceptance}; use lighthouse_network::{ rpc::{GoodbyeReason, RPCResponseErrorCode}, @@ -35,8 +36,8 @@ use task_executor::ShutdownReason; use tokio::sync::mpsc; use tokio::time::Sleep; use types::{ - ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, - Unsigned, ValidatorSubscription, + ChainSpec, DataColumnSubnetId, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, + SyncSubnetId, Unsigned, ValidatorSubscription, }; mod tests; @@ -183,6 +184,8 @@ pub struct NetworkService { next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. next_unsubscribe: Pin>>, + /// Subscribe to all the data column subnets. + subscribe_all_data_column_subnets: bool, /// Subscribe to all the subnets once synced. subscribe_all_subnets: bool, /// Shutdown beacon node after sync is complete. @@ -349,6 +352,7 @@ impl NetworkService { next_fork_update, next_fork_subscriptions, next_unsubscribe, + subscribe_all_data_column_subnets: config.subscribe_all_data_column_subnets, subscribe_all_subnets: config.subscribe_all_subnets, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -733,6 +737,15 @@ impl NetworkService { } } + // TODO(das): This is added here for the purpose of testing, *without* having to + // activate Electra. This should happen as part of the Electra upgrade and we should + // move the subscription logic once it's ready to rebase PeerDAS on Electra, or if + // we decide to activate via the soft fork route: + // https://github.com/sigp/lighthouse/pull/5899 + if self.fork_context.spec.is_peer_das_scheduled() { + self.subscribe_to_peer_das_topics(&mut subscribed_topics); + } + // If we are to subscribe to all subnets we do it here if self.subscribe_all_subnets { for subnet_id in 0..<::EthSpec as EthSpec>::SubnetBitfieldLength::to_u64() { @@ -779,6 +792,45 @@ impl NetworkService { } } + fn subscribe_to_peer_das_topics(&mut self, subscribed_topics: &mut Vec) { + if self.subscribe_all_data_column_subnets { + for column_subnet in 0..self.fork_context.spec.data_column_sidecar_subnet_count { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = + Subnet::DataColumn(DataColumnSubnetId::new(column_subnet)).into(); + let topic = + GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } else { + for column_subnet in DataColumnSubnetId::compute_custody_subnets::( + self.network_globals.local_enr().node_id().raw().into(), + self.network_globals + .local_enr() + .custody_subnet_count::<::EthSpec>( + &self.fork_context.spec, + ), + &self.fork_context.spec, + ) { + for fork_digest in self.required_gossip_fork_digests() { + let gossip_kind = Subnet::DataColumn(column_subnet).into(); + let topic = + GossipTopic::new(gossip_kind, GossipEncoding::default(), fork_digest); + if self.libp2p.subscribe(topic.clone()) { + subscribed_topics.push(topic); + } else { + warn!(self.log, "Could not subscribe to topic"; "topic" => %topic); + } + } + } + } + } + /// Handle a message sent to the network service. async fn on_validator_subscription_msg(&mut self, msg: ValidatorSubscriptionMessage) { match msg { diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 2e1b1c093c8..3f991d4db28 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -54,6 +54,18 @@ pub fn cli_app() -> Command { /* * Network parameters. */ + .arg( + Arg::new("subscribe-all-data-column-subnets") + .long("subscribe-all-data-column-subnets") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .help("Subscribe to all data column subnets and participate in data custody for \ + all columns. This will also advertise the beacon node as being long-lived \ + subscribed to all data column subnets. \ + NOTE: this is an experimental flag and may change any time without notice!") + .display_order(0) + .hide(true) + ) .arg( Arg::new("subscribe-all-subnets") .long("subscribe-all-subnets") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index b4fa38da7d7..24bef73f7c2 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -1130,6 +1130,10 @@ pub fn set_network_config( config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; + if parse_flag(cli_args, "subscribe-all-data-column-subnets") { + config.subscribe_all_data_column_subnets = true; + } + if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ca4df32d1e5..ed929061ffb 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -426,6 +426,13 @@ impl ChainSpec { }) } + /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + pub fn is_peer_das_scheduled(&self) -> bool { + self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { + eip7594_fork_epoch != self.far_future_epoch + }) + } + /// Returns a full `Fork` struct for a given epoch. pub fn fork_at_epoch(&self, epoch: Epoch) -> Fork { let current_fork_name = self.fork_name_at_epoch(epoch); diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 52960b929d8..26e17ba73ee 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -10,7 +10,7 @@ use std::{fs, net::Ipv4Addr}; use std::{fs::File, num::NonZeroU16}; use types::{ChainSpec, EnrForkId, Epoch, EthSpec, Hash256}; -pub fn run(matches: &ArgMatches) -> Result<(), String> { +pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), String> { let ip: Ipv4Addr = clap_utils::parse_required(matches, "ip")?; let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; @@ -37,7 +37,7 @@ pub fn run(matches: &ArgMatches) -> Result<(), String> { next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, &enr_fork_id) + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; diff --git a/lcli/src/main.rs b/lcli/src/main.rs index 380aeb6aceb..f055a23b362 100644 --- a/lcli/src/main.rs +++ b/lcli/src/main.rs @@ -707,8 +707,10 @@ fn run(env_builder: EnvironmentBuilder, matches: &ArgMatches) -> } Some(("check-deposit-data", matches)) => check_deposit_data::run(matches) .map_err(|e| format!("Failed to run check-deposit-data command: {}", e)), - Some(("generate-bootnode-enr", matches)) => generate_bootnode_enr::run::(matches) - .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)), + Some(("generate-bootnode-enr", matches)) => { + generate_bootnode_enr::run::(matches, &env.eth2_config.spec) + .map_err(|e| format!("Failed to run generate-bootnode-enr command: {}", e)) + } Some(("mnemonic-validators", matches)) => mnemonic_validators::run(matches) .map_err(|e| format!("Failed to run mnemonic-validators command: {}", e)), Some(("indexed-attestations", matches)) => indexed_attestations::run::(matches) From ff15c78ced26388ed882df494f922ce7bebba74c Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 13 Aug 2024 08:16:14 +0800 Subject: [PATCH 16/18] Implement data columns by network boilerplate (#6224) * Implement data columns by network boilerplate * Use correct quota values * Address PR review * Update currently_supported * Merge remote-tracking branch 'sigp/unstable' into peerdas-network-boilerplate * PR reviews * Fix data column rpc request not being sent due to incorrect limits set. (#6000) --- beacon_node/beacon_processor/src/lib.rs | 28 +++- .../src/peer_manager/mod.rs | 6 + .../src/rpc/codec/ssz_snappy.rs | 140 +++++++++++++++++- .../lighthouse_network/src/rpc/config.rs | 22 +++ .../lighthouse_network/src/rpc/methods.rs | 88 ++++++++++- beacon_node/lighthouse_network/src/rpc/mod.rs | 2 + .../lighthouse_network/src/rpc/outbound.rs | 22 +++ .../lighthouse_network/src/rpc/protocol.rs | 56 ++++++- .../src/rpc/rate_limiter.rs | 28 ++++ .../src/service/api_types.rs | 33 ++++- .../lighthouse_network/src/service/mod.rs | 30 ++++ .../src/network_beacon_processor/mod.rs | 38 ++++- .../network_beacon_processor/rpc_methods.rs | 33 ++++- beacon_node/network/src/router.rs | 99 +++++++++++-- beacon_node/network/src/sync/manager.rs | 33 +++++ 15 files changed, 624 insertions(+), 34 deletions(-) diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index f491dc7ffb0..68c33e99baf 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -119,6 +119,8 @@ pub struct BeaconProcessorQueueLengths { bbroots_queue: usize, blbroots_queue: usize, blbrange_queue: usize, + dcbroots_queue: usize, + dcbrange_queue: usize, gossip_bls_to_execution_change_queue: usize, lc_bootstrap_queue: usize, lc_optimistic_update_queue: usize, @@ -172,6 +174,9 @@ impl BeaconProcessorQueueLengths { bbroots_queue: 1024, blbroots_queue: 1024, blbrange_queue: 1024, + // TODO(das): pick proper values + dcbroots_queue: 1024, + dcbrange_queue: 1024, gossip_bls_to_execution_change_queue: 16384, lc_bootstrap_queue: 1024, lc_optimistic_update_queue: 512, @@ -230,6 +235,8 @@ pub const BLOCKS_BY_RANGE_REQUEST: &str = "blocks_by_range_request"; pub const BLOCKS_BY_ROOTS_REQUEST: &str = "blocks_by_roots_request"; pub const BLOBS_BY_RANGE_REQUEST: &str = "blobs_by_range_request"; pub const BLOBS_BY_ROOTS_REQUEST: &str = "blobs_by_roots_request"; +pub const DATA_COLUMNS_BY_ROOTS_REQUEST: &str = "data_columns_by_roots_request"; +pub const DATA_COLUMNS_BY_RANGE_REQUEST: &str = "data_columns_by_range_request"; pub const LIGHT_CLIENT_BOOTSTRAP_REQUEST: &str = "light_client_bootstrap"; pub const LIGHT_CLIENT_FINALITY_UPDATE_REQUEST: &str = "light_client_finality_update_request"; pub const LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST: &str = "light_client_optimistic_update_request"; @@ -609,6 +616,8 @@ pub enum Work { BlocksByRootsRequest(AsyncFn), BlobsByRangeRequest(BlockingFn), BlobsByRootsRequest(BlockingFn), + DataColumnsByRootsRequest(BlockingFn), + DataColumnsByRangeRequest(BlockingFn), GossipBlsToExecutionChange(BlockingFn), LightClientBootstrapRequest(BlockingFn), LightClientOptimisticUpdateRequest(BlockingFn), @@ -652,6 +661,8 @@ impl Work { Work::BlocksByRootsRequest(_) => BLOCKS_BY_ROOTS_REQUEST, Work::BlobsByRangeRequest(_) => BLOBS_BY_RANGE_REQUEST, Work::BlobsByRootsRequest(_) => BLOBS_BY_ROOTS_REQUEST, + Work::DataColumnsByRootsRequest(_) => DATA_COLUMNS_BY_ROOTS_REQUEST, + Work::DataColumnsByRangeRequest(_) => DATA_COLUMNS_BY_RANGE_REQUEST, Work::LightClientBootstrapRequest(_) => LIGHT_CLIENT_BOOTSTRAP_REQUEST, Work::LightClientOptimisticUpdateRequest(_) => LIGHT_CLIENT_OPTIMISTIC_UPDATE_REQUEST, Work::LightClientFinalityUpdateRequest(_) => LIGHT_CLIENT_FINALITY_UPDATE_REQUEST, @@ -816,6 +827,8 @@ impl BeaconProcessor { let mut bbroots_queue = FifoQueue::new(queue_lengths.bbroots_queue); let mut blbroots_queue = FifoQueue::new(queue_lengths.blbroots_queue); let mut blbrange_queue = FifoQueue::new(queue_lengths.blbrange_queue); + let mut dcbroots_queue = FifoQueue::new(queue_lengths.dcbroots_queue); + let mut dcbrange_queue = FifoQueue::new(queue_lengths.dcbrange_queue); let mut gossip_bls_to_execution_change_queue = FifoQueue::new(queue_lengths.gossip_bls_to_execution_change_queue); @@ -1118,6 +1131,10 @@ impl BeaconProcessor { self.spawn_worker(item, idle_tx); } else if let Some(item) = blbroots_queue.pop() { self.spawn_worker(item, idle_tx); + } else if let Some(item) = dcbroots_queue.pop() { + self.spawn_worker(item, idle_tx); + } else if let Some(item) = dcbrange_queue.pop() { + self.spawn_worker(item, idle_tx); // Check slashings after all other consensus messages so we prioritize // following head. // @@ -1282,6 +1299,12 @@ impl BeaconProcessor { Work::BlobsByRootsRequest { .. } => { blbroots_queue.push(work, work_id, &self.log) } + Work::DataColumnsByRootsRequest { .. } => { + dcbroots_queue.push(work, work_id, &self.log) + } + Work::DataColumnsByRangeRequest { .. } => { + dcbrange_queue.push(work, work_id, &self.log) + } Work::UnknownLightClientOptimisticUpdate { .. } => { unknown_light_client_update_queue.push(work, work_id, &self.log) } @@ -1483,7 +1506,10 @@ impl BeaconProcessor { | Work::GossipDataColumnSidecar(work) => task_spawner.spawn_async(async move { work.await; }), - Work::BlobsByRangeRequest(process_fn) | Work::BlobsByRootsRequest(process_fn) => { + Work::BlobsByRangeRequest(process_fn) + | Work::BlobsByRootsRequest(process_fn) + | Work::DataColumnsByRootsRequest(process_fn) + | Work::DataColumnsByRangeRequest(process_fn) => { task_spawner.spawn_blocking(process_fn) } Work::BlocksByRangeRequest(work) | Work::BlocksByRootsRequest(work) => { diff --git a/beacon_node/lighthouse_network/src/peer_manager/mod.rs b/beacon_node/lighthouse_network/src/peer_manager/mod.rs index 6423da56fe2..4c9551507e7 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/mod.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/mod.rs @@ -569,6 +569,8 @@ impl PeerManager { Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, Protocol::BlobsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::Goodbye => PeerAction::LowToleranceError, Protocol::MetaData => PeerAction::LowToleranceError, Protocol::Status => PeerAction::LowToleranceError, @@ -587,6 +589,8 @@ impl PeerManager { Protocol::BlocksByRoot => return, Protocol::BlobsByRange => return, Protocol::BlobsByRoot => return, + Protocol::DataColumnsByRoot => return, + Protocol::DataColumnsByRange => return, Protocol::Goodbye => return, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, @@ -607,6 +611,8 @@ impl PeerManager { Protocol::BlocksByRoot => PeerAction::MidToleranceError, Protocol::BlobsByRange => PeerAction::MidToleranceError, Protocol::BlobsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRoot => PeerAction::MidToleranceError, + Protocol::DataColumnsByRange => PeerAction::MidToleranceError, Protocol::LightClientBootstrap => return, Protocol::LightClientOptimisticUpdate => return, Protocol::LightClientFinalityUpdate => return, diff --git a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs index 482d1d96b4a..f5d8b58dcee 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec/ssz_snappy.rs @@ -16,10 +16,11 @@ use std::marker::PhantomData; use std::sync::Arc; use tokio_util::codec::{Decoder, Encoder}; use types::{ - BlobSidecar, ChainSpec, EthSpec, ForkContext, ForkName, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, - SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + BlobSidecar, ChainSpec, DataColumnSidecar, EthSpec, ForkContext, ForkName, Hash256, + LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, + RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, + SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, + SignedBeaconBlockElectra, }; use unsigned_varint::codec::Uvi; @@ -70,6 +71,8 @@ impl Encoder> for SSZSnappyInboundCodec { RPCResponse::BlocksByRoot(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRange(res) => res.as_ssz_bytes(), RPCResponse::BlobsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::DataColumnsByRoot(res) => res.as_ssz_bytes(), + RPCResponse::DataColumnsByRange(res) => res.as_ssz_bytes(), RPCResponse::LightClientBootstrap(res) => res.as_ssz_bytes(), RPCResponse::LightClientOptimisticUpdate(res) => res.as_ssz_bytes(), RPCResponse::LightClientFinalityUpdate(res) => res.as_ssz_bytes(), @@ -224,6 +227,8 @@ impl Encoder> for SSZSnappyOutboundCodec { }, OutboundRequest::BlobsByRange(req) => req.as_ssz_bytes(), OutboundRequest::BlobsByRoot(req) => req.blob_ids.as_ssz_bytes(), + OutboundRequest::DataColumnsByRange(req) => req.as_ssz_bytes(), + OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.as_ssz_bytes(), OutboundRequest::Ping(req) => req.as_ssz_bytes(), OutboundRequest::MetaData(_) => return Ok(()), // no metadata to encode }; @@ -414,7 +419,12 @@ fn context_bytes( } }; } - RPCResponse::BlobsByRange(_) | RPCResponse::BlobsByRoot(_) => { + RPCResponse::BlobsByRange(_) + | RPCResponse::BlobsByRoot(_) + | RPCResponse::DataColumnsByRoot(_) + | RPCResponse::DataColumnsByRange(_) => { + // TODO(das): If DataColumnSidecar is defined as an Electra type, update the + // context bytes to point to ForkName::Electra return fork_context.to_context_bytes(ForkName::Deneb); } RPCResponse::LightClientBootstrap(lc_bootstrap) => { @@ -512,6 +522,17 @@ fn handle_rpc_request( )?, }))) } + SupportedProtocol::DataColumnsByRootV1 => Ok(Some(InboundRequest::DataColumnsByRoot( + DataColumnsByRootRequest { + data_column_ids: RuntimeVariableList::from_ssz_bytes( + decoded_buffer, + spec.max_request_data_column_sidecars as usize, + )?, + }, + ))), + SupportedProtocol::DataColumnsByRangeV1 => Ok(Some(InboundRequest::DataColumnsByRange( + DataColumnsByRangeRequest::from_ssz_bytes(decoded_buffer)?, + ))), SupportedProtocol::PingV1 => Ok(Some(InboundRequest::Ping(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -604,6 +625,51 @@ fn handle_rpc_response( ), )), }, + SupportedProtocol::DataColumnsByRootV1 => match fork_name { + Some(fork_name) => { + // TODO(das): PeerDAS is currently supported for both deneb and electra. This check + // does not advertise the topic on deneb, simply allows it to decode it. Advertise + // logic is in `SupportedTopic::currently_supported`. + if fork_name.deneb_enabled() { + Ok(Some(RPCResponse::DataColumnsByRoot(Arc::new( + DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for data columns by root".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, + SupportedProtocol::DataColumnsByRangeV1 => match fork_name { + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RPCResponse::DataColumnsByRange(Arc::new( + DataColumnSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + "Invalid fork name for data columns by range".to_string(), + )) + } + } + None => Err(RPCError::ErrorResponse( + RPCResponseErrorCode::InvalidRequest, + format!( + "No context bytes provided for {:?} response", + versioned_protocol + ), + )), + }, SupportedProtocol::PingV1 => Ok(Some(RPCResponse::Pong(Ping { data: u64::from_ssz_bytes(decoded_buffer)?, }))), @@ -747,7 +813,8 @@ mod tests { use crate::types::{EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use types::{ blob_sidecar::BlobIdentifier, BeaconBlock, BeaconBlockAltair, BeaconBlockBase, - BeaconBlockBellatrix, EmptyBlock, Epoch, FullPayload, Signature, Slot, + BeaconBlockBellatrix, DataColumnIdentifier, EmptyBlock, Epoch, FullPayload, Signature, + Slot, }; type Spec = types::MainnetEthSpec; @@ -794,6 +861,10 @@ mod tests { Arc::new(BlobSidecar::empty()) } + fn empty_data_column_sidecar() -> Arc> { + Arc::new(DataColumnSidecar::empty()) + } + /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small( fork_context: &ForkContext, @@ -855,6 +926,27 @@ mod tests { } } + fn dcbrange_request() -> DataColumnsByRangeRequest { + DataColumnsByRangeRequest { + start_slot: 0, + count: 10, + columns: vec![1, 2, 3], + } + } + + fn dcbroot_request(spec: &ChainSpec) -> DataColumnsByRootRequest { + DataColumnsByRootRequest { + data_column_ids: RuntimeVariableList::new( + vec![DataColumnIdentifier { + block_root: Hash256::zero(), + index: 0, + }], + spec.max_request_data_column_sidecars as usize, + ) + .unwrap(), + } + } + fn bbroot_request_v1(spec: &ChainSpec) -> BlocksByRootRequest { BlocksByRootRequest::new_v1(vec![Hash256::zero()], spec) } @@ -1012,6 +1104,12 @@ mod tests { OutboundRequest::BlobsByRoot(bbroot) => { assert_eq!(decoded, InboundRequest::BlobsByRoot(bbroot)) } + OutboundRequest::DataColumnsByRoot(dcbroot) => { + assert_eq!(decoded, InboundRequest::DataColumnsByRoot(dcbroot)) + } + OutboundRequest::DataColumnsByRange(dcbrange) => { + assert_eq!(decoded, InboundRequest::DataColumnsByRange(dcbrange)) + } OutboundRequest::Ping(ping) => { assert_eq!(decoded, InboundRequest::Ping(ping)) } @@ -1138,6 +1236,34 @@ mod tests { ), Ok(Some(RPCResponse::BlobsByRoot(empty_blob_sidecar()))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRangeV1, + RPCCodedResponse::Success(RPCResponse::DataColumnsByRange( + empty_data_column_sidecar() + )), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::DataColumnsByRange( + empty_data_column_sidecar() + ))), + ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRootV1, + RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot( + empty_data_column_sidecar() + )), + ForkName::Deneb, + &chain_spec + ), + Ok(Some(RPCResponse::DataColumnsByRoot( + empty_data_column_sidecar() + ))), + ); } // Test RPCResponse encoding/decoding for V1 messages @@ -1491,6 +1617,8 @@ mod tests { OutboundRequest::MetaData(MetadataRequest::new_v1()), OutboundRequest::BlobsByRange(blbrange_request()), OutboundRequest::BlobsByRoot(blbroot_request(&chain_spec)), + OutboundRequest::DataColumnsByRange(dcbrange_request()), + OutboundRequest::DataColumnsByRoot(dcbroot_request(&chain_spec)), OutboundRequest::MetaData(MetadataRequest::new_v2()), ]; diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index d17fa112a1b..7ff189b9815 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -91,6 +91,8 @@ pub struct RateLimiterConfig { pub(super) blocks_by_root_quota: Quota, pub(super) blobs_by_range_quota: Quota, pub(super) blobs_by_root_quota: Quota, + pub(super) data_columns_by_root_quota: Quota, + pub(super) data_columns_by_range_quota: Quota, pub(super) light_client_bootstrap_quota: Quota, pub(super) light_client_optimistic_update_quota: Quota, pub(super) light_client_finality_update_quota: Quota, @@ -110,6 +112,12 @@ impl RateLimiterConfig { // measured against the maximum request size. pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(6144, 10); pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(768, 10); + // 320 blocks worth of columns for regular node, or 40 blocks for supernode. + // Range sync load balances when requesting blocks, and each batch is 32 blocks. + pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); + // 512 columns per request from spec. This should be plenty as peers are unlikely to send all + // sampling requests to a single peer. + pub const DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); pub const DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA: Quota = Quota::one_every(10); pub const DEFAULT_LIGHT_CLIENT_FINALITY_UPDATE_QUOTA: Quota = Quota::one_every(10); @@ -126,6 +134,8 @@ impl Default for RateLimiterConfig { blocks_by_root_quota: Self::DEFAULT_BLOCKS_BY_ROOT_QUOTA, blobs_by_range_quota: Self::DEFAULT_BLOBS_BY_RANGE_QUOTA, blobs_by_root_quota: Self::DEFAULT_BLOBS_BY_ROOT_QUOTA, + data_columns_by_root_quota: Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA, + data_columns_by_range_quota: Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA, light_client_bootstrap_quota: Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA, light_client_optimistic_update_quota: Self::DEFAULT_LIGHT_CLIENT_OPTIMISTIC_UPDATE_QUOTA, @@ -175,6 +185,8 @@ impl FromStr for RateLimiterConfig { let mut blocks_by_root_quota = None; let mut blobs_by_range_quota = None; let mut blobs_by_root_quota = None; + let mut data_columns_by_root_quota = None; + let mut data_columns_by_range_quota = None; let mut light_client_bootstrap_quota = None; let mut light_client_optimistic_update_quota = None; let mut light_client_finality_update_quota = None; @@ -189,6 +201,12 @@ impl FromStr for RateLimiterConfig { Protocol::BlocksByRoot => blocks_by_root_quota = blocks_by_root_quota.or(quota), Protocol::BlobsByRange => blobs_by_range_quota = blobs_by_range_quota.or(quota), Protocol::BlobsByRoot => blobs_by_root_quota = blobs_by_root_quota.or(quota), + Protocol::DataColumnsByRoot => { + data_columns_by_root_quota = data_columns_by_root_quota.or(quota) + } + Protocol::DataColumnsByRange => { + data_columns_by_range_quota = data_columns_by_range_quota.or(quota) + } Protocol::Ping => ping_quota = ping_quota.or(quota), Protocol::MetaData => meta_data_quota = meta_data_quota.or(quota), Protocol::LightClientBootstrap => { @@ -216,6 +234,10 @@ impl FromStr for RateLimiterConfig { blobs_by_range_quota: blobs_by_range_quota .unwrap_or(Self::DEFAULT_BLOBS_BY_RANGE_QUOTA), blobs_by_root_quota: blobs_by_root_quota.unwrap_or(Self::DEFAULT_BLOBS_BY_ROOT_QUOTA), + data_columns_by_root_quota: data_columns_by_root_quota + .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_ROOT_QUOTA), + data_columns_by_range_quota: data_columns_by_range_quota + .unwrap_or(Self::DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA), light_client_bootstrap_quota: light_client_bootstrap_quota .unwrap_or(Self::DEFAULT_LIGHT_CLIENT_BOOTSTRAP_QUOTA), light_client_optimistic_update_quota: light_client_optimistic_update_quota diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 1b0486ff771..8849a5433d4 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -14,9 +14,9 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::{ - blob_sidecar::BlobSidecar, ChainSpec, Epoch, EthSpec, Hash256, LightClientBootstrap, - LightClientFinalityUpdate, LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, - Slot, + blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, + Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, + LightClientOptimisticUpdate, RuntimeVariableList, SignedBeaconBlock, Slot, }; /// Maximum length of error message. @@ -293,6 +293,43 @@ impl BlobsByRangeRequest { } } +/// Request a number of beacon data columns from a peer. +#[derive(Encode, Decode, Clone, Debug, PartialEq)] +pub struct DataColumnsByRangeRequest { + /// The starting slot to request data columns. + pub start_slot: u64, + /// The number of slots from the start slot. + pub count: u64, + /// The list column indices being requested. + pub columns: Vec, +} + +impl DataColumnsByRangeRequest { + pub fn max_requested(&self) -> u64 { + self.count.saturating_mul(self.columns.len() as u64) + } + + pub fn ssz_min_len() -> usize { + DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + columns: vec![0], + } + .as_ssz_bytes() + .len() + } + + pub fn ssz_max_len(spec: &ChainSpec) -> usize { + DataColumnsByRangeRequest { + start_slot: 0, + count: 0, + columns: vec![0; spec.number_of_columns], + } + .as_ssz_bytes() + .len() + } +} + /// Request a number of beacon block roots from a peer. #[superstruct( variants(V1, V2), @@ -370,6 +407,27 @@ impl BlobsByRootRequest { } } +/// Request a number of data columns from a peer. +#[derive(Clone, Debug, PartialEq)] +pub struct DataColumnsByRootRequest { + /// The list of beacon block roots and column indices being requested. + pub data_column_ids: RuntimeVariableList, +} + +impl DataColumnsByRootRequest { + pub fn new(data_column_ids: Vec, spec: &ChainSpec) -> Self { + let data_column_ids = RuntimeVariableList::from_vec( + data_column_ids, + spec.max_request_data_column_sidecars as usize, + ); + Self { data_column_ids } + } + + pub fn new_single(block_root: Hash256, index: ColumnIndex, spec: &ChainSpec) -> Self { + Self::new(vec![DataColumnIdentifier { block_root, index }], spec) + } +} + /* RPC Handling and Grouping */ // Collection of enums and structs used by the Codecs to encode/decode RPC messages @@ -400,6 +458,12 @@ pub enum RPCResponse { /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Arc>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. + DataColumnsByRoot(Arc>), + + /// A response to a get DATA_COLUMN_SIDECARS_BY_RANGE request. + DataColumnsByRange(Arc>), + /// A PONG response to a PING request. Pong(Ping), @@ -421,6 +485,12 @@ pub enum ResponseTermination { /// Blobs by root stream termination. BlobsByRoot, + + /// Data column sidecars by root stream termination. + DataColumnsByRoot, + + /// Data column sidecars by range stream termination. + DataColumnsByRange, } /// The structured response containing a result/code indicating success or failure @@ -511,6 +581,8 @@ impl RPCResponse { RPCResponse::BlocksByRoot(_) => Protocol::BlocksByRoot, RPCResponse::BlobsByRange(_) => Protocol::BlobsByRange, RPCResponse::BlobsByRoot(_) => Protocol::BlobsByRoot, + RPCResponse::DataColumnsByRoot(_) => Protocol::DataColumnsByRoot, + RPCResponse::DataColumnsByRange(_) => Protocol::DataColumnsByRange, RPCResponse::Pong(_) => Protocol::Ping, RPCResponse::MetaData(_) => Protocol::MetaData, RPCResponse::LightClientBootstrap(_) => Protocol::LightClientBootstrap, @@ -556,6 +628,16 @@ impl std::fmt::Display for RPCResponse { RPCResponse::BlobsByRoot(sidecar) => { write!(f, "BlobsByRoot: Blob slot: {}", sidecar.slot()) } + RPCResponse::DataColumnsByRoot(sidecar) => { + write!(f, "DataColumnsByRoot: Data column slot: {}", sidecar.slot()) + } + RPCResponse::DataColumnsByRange(sidecar) => { + write!( + f, + "DataColumnsByRange: Data column slot: {}", + sidecar.slot() + ) + } RPCResponse::Pong(ping) => write!(f, "Pong: {}", ping.data), RPCResponse::MetaData(metadata) => write!(f, "Metadata: {}", metadata.seq_number()), RPCResponse::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 027af89edfa..666cbe6fbcc 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -471,6 +471,8 @@ where ResponseTermination::BlocksByRoot => Protocol::BlocksByRoot, ResponseTermination::BlobsByRange => Protocol::BlobsByRange, ResponseTermination::BlobsByRoot => Protocol::BlobsByRoot, + ResponseTermination::DataColumnsByRoot => Protocol::DataColumnsByRoot, + ResponseTermination::DataColumnsByRange => Protocol::DataColumnsByRange, }, ), }; diff --git a/beacon_node/lighthouse_network/src/rpc/outbound.rs b/beacon_node/lighthouse_network/src/rpc/outbound.rs index 8ea7b84bc95..7752d27e759 100644 --- a/beacon_node/lighthouse_network/src/rpc/outbound.rs +++ b/beacon_node/lighthouse_network/src/rpc/outbound.rs @@ -36,6 +36,8 @@ pub enum OutboundRequest { BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), + DataColumnsByRoot(DataColumnsByRootRequest), + DataColumnsByRange(DataColumnsByRangeRequest), Ping(Ping), MetaData(MetadataRequest), } @@ -79,6 +81,14 @@ impl OutboundRequest { SupportedProtocol::BlobsByRootV1, Encoding::SSZSnappy, )], + OutboundRequest::DataColumnsByRoot(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRootV1, + Encoding::SSZSnappy, + )], + OutboundRequest::DataColumnsByRange(_) => vec![ProtocolId::new( + SupportedProtocol::DataColumnsByRangeV1, + Encoding::SSZSnappy, + )], OutboundRequest::Ping(_) => vec![ProtocolId::new( SupportedProtocol::PingV1, Encoding::SSZSnappy, @@ -100,6 +110,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, OutboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), OutboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, + OutboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + OutboundRequest::DataColumnsByRange(req) => req.max_requested::(), OutboundRequest::Ping(_) => 1, OutboundRequest::MetaData(_) => 1, } @@ -113,6 +125,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => false, OutboundRequest::BlobsByRange(_) => false, OutboundRequest::BlobsByRoot(_) => false, + OutboundRequest::DataColumnsByRoot(_) => false, + OutboundRequest::DataColumnsByRange(_) => false, OutboundRequest::Ping(_) => true, OutboundRequest::MetaData(_) => true, } @@ -133,6 +147,8 @@ impl OutboundRequest { }, OutboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, OutboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + OutboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + OutboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, OutboundRequest::Ping(_) => SupportedProtocol::PingV1, OutboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -151,6 +167,8 @@ impl OutboundRequest { OutboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, OutboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, OutboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + OutboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + OutboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, OutboundRequest::Status(_) => unreachable!(), OutboundRequest::Goodbye(_) => unreachable!(), OutboundRequest::Ping(_) => unreachable!(), @@ -208,6 +226,10 @@ impl std::fmt::Display for OutboundRequest { OutboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), OutboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), OutboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + OutboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + OutboundRequest::DataColumnsByRange(req) => { + write!(f, "Data columns by range: {:?}", req) + } OutboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), OutboundRequest::MetaData(_) => write!(f, "MetaData request"), } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 2cdd730a2b0..6f7f0348345 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,10 +18,10 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BlobSidecar, ChainSpec, EmptyBlock, EthSpec, ForkContext, ForkName, LightClientBootstrap, - LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, - LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, - SignedBeaconBlock, + BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, ForkContext, ForkName, + LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, + LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + LightClientOptimisticUpdateAltair, MainnetEthSpec, Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -268,6 +268,12 @@ pub enum Protocol { /// The `BlobsByRoot` protocol name. #[strum(serialize = "blob_sidecars_by_root")] BlobsByRoot, + /// The `DataColumnSidecarsByRoot` protocol name. + #[strum(serialize = "data_column_sidecars_by_root")] + DataColumnsByRoot, + /// The `DataColumnSidecarsByRange` protocol name. + #[strum(serialize = "data_column_sidecars_by_range")] + DataColumnsByRange, /// The `Ping` protocol name. Ping, /// The `MetaData` protocol name. @@ -293,6 +299,8 @@ impl Protocol { Protocol::BlocksByRoot => Some(ResponseTermination::BlocksByRoot), Protocol::BlobsByRange => Some(ResponseTermination::BlobsByRange), Protocol::BlobsByRoot => Some(ResponseTermination::BlobsByRoot), + Protocol::DataColumnsByRoot => Some(ResponseTermination::DataColumnsByRoot), + Protocol::DataColumnsByRange => Some(ResponseTermination::DataColumnsByRange), Protocol::Ping => None, Protocol::MetaData => None, Protocol::LightClientBootstrap => None, @@ -319,6 +327,8 @@ pub enum SupportedProtocol { BlocksByRootV2, BlobsByRangeV1, BlobsByRootV1, + DataColumnsByRootV1, + DataColumnsByRangeV1, PingV1, MetaDataV1, MetaDataV2, @@ -338,6 +348,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRootV2 => "2", SupportedProtocol::BlobsByRangeV1 => "1", SupportedProtocol::BlobsByRootV1 => "1", + SupportedProtocol::DataColumnsByRootV1 => "1", + SupportedProtocol::DataColumnsByRangeV1 => "1", SupportedProtocol::PingV1 => "1", SupportedProtocol::MetaDataV1 => "1", SupportedProtocol::MetaDataV2 => "2", @@ -357,6 +369,8 @@ impl SupportedProtocol { SupportedProtocol::BlocksByRootV2 => Protocol::BlocksByRoot, SupportedProtocol::BlobsByRangeV1 => Protocol::BlobsByRange, SupportedProtocol::BlobsByRootV1 => Protocol::BlobsByRoot, + SupportedProtocol::DataColumnsByRootV1 => Protocol::DataColumnsByRoot, + SupportedProtocol::DataColumnsByRangeV1 => Protocol::DataColumnsByRange, SupportedProtocol::PingV1 => Protocol::Ping, SupportedProtocol::MetaDataV1 => Protocol::MetaData, SupportedProtocol::MetaDataV2 => Protocol::MetaData, @@ -387,6 +401,12 @@ impl SupportedProtocol { ProtocolId::new(SupportedProtocol::BlobsByRangeV1, Encoding::SSZSnappy), ]); } + if fork_context.spec.is_peer_das_scheduled() { + supported.extend_from_slice(&[ + ProtocolId::new(SupportedProtocol::DataColumnsByRootV1, Encoding::SSZSnappy), + ProtocolId::new(SupportedProtocol::DataColumnsByRangeV1, Encoding::SSZSnappy), + ]); + } supported } } @@ -495,6 +515,11 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::BlobsByRoot => RpcLimits::new(0, spec.max_blobs_by_root_request), + Protocol::DataColumnsByRoot => RpcLimits::new(0, spec.max_data_columns_by_root_request), + Protocol::DataColumnsByRange => RpcLimits::new( + DataColumnsByRangeRequest::ssz_min_len(), + DataColumnsByRangeRequest::ssz_max_len(spec), + ), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -521,6 +546,8 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), + Protocol::DataColumnsByRoot => rpc_data_column_limits::(), + Protocol::DataColumnsByRange => rpc_data_column_limits::(), Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -549,6 +576,8 @@ impl ProtocolId { | SupportedProtocol::BlocksByRootV2 | SupportedProtocol::BlobsByRangeV1 | SupportedProtocol::BlobsByRootV1 + | SupportedProtocol::DataColumnsByRootV1 + | SupportedProtocol::DataColumnsByRangeV1 | SupportedProtocol::LightClientBootstrapV1 | SupportedProtocol::LightClientOptimisticUpdateV1 | SupportedProtocol::LightClientFinalityUpdateV1 => true, @@ -589,6 +618,13 @@ pub fn rpc_blob_limits() -> RpcLimits { ) } +pub fn rpc_data_column_limits() -> RpcLimits { + RpcLimits::new( + DataColumnSidecar::::empty().as_ssz_bytes().len(), + DataColumnSidecar::::max_size(), + ) +} + /* Inbound upgrade */ // The inbound protocol reads the request, decodes it and returns the stream to the protocol @@ -668,6 +704,8 @@ pub enum InboundRequest { BlocksByRoot(BlocksByRootRequest), BlobsByRange(BlobsByRangeRequest), BlobsByRoot(BlobsByRootRequest), + DataColumnsByRoot(DataColumnsByRootRequest), + DataColumnsByRange(DataColumnsByRangeRequest), LightClientBootstrap(LightClientBootstrapRequest), LightClientOptimisticUpdate, LightClientFinalityUpdate, @@ -688,6 +726,8 @@ impl InboundRequest { InboundRequest::BlocksByRoot(req) => req.block_roots().len() as u64, InboundRequest::BlobsByRange(req) => req.max_blobs_requested::(), InboundRequest::BlobsByRoot(req) => req.blob_ids.len() as u64, + InboundRequest::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, + InboundRequest::DataColumnsByRange(req) => req.max_requested::(), InboundRequest::Ping(_) => 1, InboundRequest::MetaData(_) => 1, InboundRequest::LightClientBootstrap(_) => 1, @@ -711,6 +751,8 @@ impl InboundRequest { }, InboundRequest::BlobsByRange(_) => SupportedProtocol::BlobsByRangeV1, InboundRequest::BlobsByRoot(_) => SupportedProtocol::BlobsByRootV1, + InboundRequest::DataColumnsByRoot(_) => SupportedProtocol::DataColumnsByRootV1, + InboundRequest::DataColumnsByRange(_) => SupportedProtocol::DataColumnsByRangeV1, InboundRequest::Ping(_) => SupportedProtocol::PingV1, InboundRequest::MetaData(req) => match req { MetadataRequest::V1(_) => SupportedProtocol::MetaDataV1, @@ -736,6 +778,8 @@ impl InboundRequest { InboundRequest::BlocksByRoot(_) => ResponseTermination::BlocksByRoot, InboundRequest::BlobsByRange(_) => ResponseTermination::BlobsByRange, InboundRequest::BlobsByRoot(_) => ResponseTermination::BlobsByRoot, + InboundRequest::DataColumnsByRoot(_) => ResponseTermination::DataColumnsByRoot, + InboundRequest::DataColumnsByRange(_) => ResponseTermination::DataColumnsByRange, InboundRequest::Status(_) => unreachable!(), InboundRequest::Goodbye(_) => unreachable!(), InboundRequest::Ping(_) => unreachable!(), @@ -846,6 +890,10 @@ impl std::fmt::Display for InboundRequest { InboundRequest::BlocksByRoot(req) => write!(f, "Blocks by root: {:?}", req), InboundRequest::BlobsByRange(req) => write!(f, "Blobs by range: {:?}", req), InboundRequest::BlobsByRoot(req) => write!(f, "Blobs by root: {:?}", req), + InboundRequest::DataColumnsByRoot(req) => write!(f, "Data columns by root: {:?}", req), + InboundRequest::DataColumnsByRange(req) => { + write!(f, "Data columns by range: {:?}", req) + } InboundRequest::Ping(ping) => write!(f, "Ping: {}", ping.data), InboundRequest::MetaData(_) => write!(f, "MetaData request"), InboundRequest::LightClientBootstrap(bootstrap) => { diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index b304eb546da..9fb085efd86 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -97,6 +97,10 @@ pub struct RPCRateLimiter { blbrange_rl: Limiter, /// BlobsByRoot rate limiter. blbroot_rl: Limiter, + /// DataColumnssByRoot rate limiter. + dcbroot_rl: Limiter, + /// DataColumnsByRange rate limiter. + dcbrange_rl: Limiter, /// LightClientBootstrap rate limiter. lc_bootstrap_rl: Limiter, /// LightClientOptimisticUpdate rate limiter. @@ -133,6 +137,10 @@ pub struct RPCRateLimiterBuilder { blbrange_quota: Option, /// Quota for the BlobsByRoot protocol. blbroot_quota: Option, + /// Quota for the DataColumnsByRoot protocol. + dcbroot_quota: Option, + /// Quota for the DataColumnsByRange protocol. + dcbrange_quota: Option, /// Quota for the LightClientBootstrap protocol. lcbootstrap_quota: Option, /// Quota for the LightClientOptimisticUpdate protocol. @@ -154,6 +162,8 @@ impl RPCRateLimiterBuilder { Protocol::BlocksByRoot => self.bbroots_quota = q, Protocol::BlobsByRange => self.blbrange_quota = q, Protocol::BlobsByRoot => self.blbroot_quota = q, + Protocol::DataColumnsByRoot => self.dcbroot_quota = q, + Protocol::DataColumnsByRange => self.dcbrange_quota = q, Protocol::LightClientBootstrap => self.lcbootstrap_quota = q, Protocol::LightClientOptimisticUpdate => self.lc_optimistic_update_quota = q, Protocol::LightClientFinalityUpdate => self.lc_finality_update_quota = q, @@ -191,6 +201,14 @@ impl RPCRateLimiterBuilder { .blbroot_quota .ok_or("BlobsByRoot quota not specified")?; + let dcbroot_quota = self + .dcbroot_quota + .ok_or("DataColumnsByRoot quota not specified")?; + + let dcbrange_quota = self + .dcbrange_quota + .ok_or("DataColumnsByRange quota not specified")?; + // create the rate limiters let ping_rl = Limiter::from_quota(ping_quota)?; let metadata_rl = Limiter::from_quota(metadata_quota)?; @@ -200,6 +218,8 @@ impl RPCRateLimiterBuilder { let bbrange_rl = Limiter::from_quota(bbrange_quota)?; let blbrange_rl = Limiter::from_quota(blbrange_quota)?; let blbroot_rl = Limiter::from_quota(blbroots_quota)?; + let dcbroot_rl = Limiter::from_quota(dcbroot_quota)?; + let dcbrange_rl = Limiter::from_quota(dcbrange_quota)?; let lc_bootstrap_rl = Limiter::from_quota(lc_bootstrap_quota)?; let lc_optimistic_update_rl = Limiter::from_quota(lc_optimistic_update_quota)?; let lc_finality_update_rl = Limiter::from_quota(lc_finality_update_quota)?; @@ -218,6 +238,8 @@ impl RPCRateLimiterBuilder { bbrange_rl, blbrange_rl, blbroot_rl, + dcbroot_rl, + dcbrange_rl, lc_bootstrap_rl, lc_optimistic_update_rl, lc_finality_update_rl, @@ -262,6 +284,8 @@ impl RPCRateLimiter { blocks_by_root_quota, blobs_by_range_quota, blobs_by_root_quota, + data_columns_by_root_quota, + data_columns_by_range_quota, light_client_bootstrap_quota, light_client_optimistic_update_quota, light_client_finality_update_quota, @@ -276,6 +300,8 @@ impl RPCRateLimiter { .set_quota(Protocol::BlocksByRoot, blocks_by_root_quota) .set_quota(Protocol::BlobsByRange, blobs_by_range_quota) .set_quota(Protocol::BlobsByRoot, blobs_by_root_quota) + .set_quota(Protocol::DataColumnsByRoot, data_columns_by_root_quota) + .set_quota(Protocol::DataColumnsByRange, data_columns_by_range_quota) .set_quota(Protocol::LightClientBootstrap, light_client_bootstrap_quota) .set_quota( Protocol::LightClientOptimisticUpdate, @@ -312,6 +338,8 @@ impl RPCRateLimiter { Protocol::BlocksByRoot => &mut self.bbroots_rl, Protocol::BlobsByRange => &mut self.blbrange_rl, Protocol::BlobsByRoot => &mut self.blbroot_rl, + Protocol::DataColumnsByRoot => &mut self.dcbroot_rl, + Protocol::DataColumnsByRange => &mut self.dcbrange_rl, Protocol::LightClientBootstrap => &mut self.lc_bootstrap_rl, Protocol::LightClientOptimisticUpdate => &mut self.lc_optimistic_update_rl, Protocol::LightClientFinalityUpdate => &mut self.lc_finality_update_rl, diff --git a/beacon_node/lighthouse_network/src/service/api_types.rs b/beacon_node/lighthouse_network/src/service/api_types.rs index 376ac34dee7..11df591ae4c 100644 --- a/beacon_node/lighthouse_network/src/service/api_types.rs +++ b/beacon_node/lighthouse_network/src/service/api_types.rs @@ -2,11 +2,13 @@ use std::sync::Arc; use libp2p::swarm::ConnectionId; use types::{ - BlobSidecar, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, + BlobSidecar, DataColumnSidecar, EthSpec, LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, SignedBeaconBlock, }; -use crate::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use crate::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use crate::rpc::{ methods::{ BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, @@ -27,6 +29,11 @@ pub struct SingleLookupReqId { pub req_id: Id, } +/// Request ID for data_columns_by_root requests. Block lookup do not issue this requests directly. +/// Wrapping this particular req_id, ensures not mixing this requests with a custody req_id. +#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] +pub struct DataColumnsByRootRequestId(pub Id); + /// Id of rpc requests sent by sync to the network. #[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)] pub enum SyncRequestId { @@ -34,6 +41,8 @@ pub enum SyncRequestId { SingleBlock { id: SingleLookupReqId }, /// Request searching for a set of blobs given a hash. SingleBlob { id: SingleLookupReqId }, + /// Request searching for a set of data columns given a hash and list of column indices. + DataColumnsByRoot(DataColumnsByRootRequestId, SingleLookupReqId), /// Range request that is composed by both a block range request and a blob range request. RangeBlockAndBlobs { id: Id }, } @@ -75,6 +84,10 @@ pub enum Request { LightClientFinalityUpdate, /// A request blobs root request. BlobsByRoot(BlobsByRootRequest), + /// A request data columns root request. + DataColumnsByRoot(DataColumnsByRootRequest), + /// A request data columns by range request. + DataColumnsByRange(DataColumnsByRangeRequest), } impl std::convert::From for OutboundRequest { @@ -104,6 +117,8 @@ impl std::convert::From for OutboundRequest { } Request::BlobsByRange(r) => OutboundRequest::BlobsByRange(r), Request::BlobsByRoot(r) => OutboundRequest::BlobsByRoot(r), + Request::DataColumnsByRoot(r) => OutboundRequest::DataColumnsByRoot(r), + Request::DataColumnsByRange(r) => OutboundRequest::DataColumnsByRange(r), Request::Status(s) => OutboundRequest::Status(s), } } @@ -123,10 +138,14 @@ pub enum Response { BlocksByRange(Option>>), /// A response to a get BLOBS_BY_RANGE request. A None response signals the end of the batch. BlobsByRange(Option>>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_Range request. + DataColumnsByRange(Option>>), /// A response to a get BLOCKS_BY_ROOT request. BlocksByRoot(Option>>), /// A response to a get BLOBS_BY_ROOT request. BlobsByRoot(Option>>), + /// A response to a get DATA_COLUMN_SIDECARS_BY_ROOT request. + DataColumnsByRoot(Option>>), /// A response to a LightClientUpdate request. LightClientBootstrap(Arc>), /// A response to a LightClientOptimisticUpdate request. @@ -154,6 +173,16 @@ impl std::convert::From> for RPCCodedResponse { Some(b) => RPCCodedResponse::Success(RPCResponse::BlobsByRange(b)), None => RPCCodedResponse::StreamTermination(ResponseTermination::BlobsByRange), }, + Response::DataColumnsByRoot(r) => match r { + Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRoot(d)), + None => RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRoot), + }, + Response::DataColumnsByRange(r) => match r { + Some(d) => RPCCodedResponse::Success(RPCResponse::DataColumnsByRange(d)), + None => { + RPCCodedResponse::StreamTermination(ResponseTermination::DataColumnsByRange) + } + }, Response::Status(s) => RPCCodedResponse::Success(RPCResponse::Status(s)), Response::LightClientBootstrap(b) => { RPCCodedResponse::Success(RPCResponse::LightClientBootstrap(b)) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index fe649f4199a..4ef080619eb 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1204,6 +1204,12 @@ impl Network { Request::BlobsByRoot { .. } => { metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["blobs_by_root"]) } + Request::DataColumnsByRoot { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_root"]) + } + Request::DataColumnsByRange { .. } => { + metrics::inc_counter_vec(&metrics::TOTAL_RPC_REQUESTS, &["data_columns_by_range"]) + } } NetworkEvent::RequestReceived { peer_id, @@ -1523,6 +1529,22 @@ impl Network { self.build_request(peer_request_id, peer_id, Request::BlobsByRoot(req)); Some(event) } + InboundRequest::DataColumnsByRoot(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::DataColumnsByRoot(req), + ); + Some(event) + } + InboundRequest::DataColumnsByRange(req) => { + let event = self.build_request( + peer_request_id, + peer_id, + Request::DataColumnsByRange(req), + ); + Some(event) + } InboundRequest::LightClientBootstrap(req) => { let event = self.build_request( peer_request_id, @@ -1580,6 +1602,12 @@ impl Network { RPCResponse::BlobsByRoot(resp) => { self.build_response(id, peer_id, Response::BlobsByRoot(Some(resp))) } + RPCResponse::DataColumnsByRoot(resp) => { + self.build_response(id, peer_id, Response::DataColumnsByRoot(Some(resp))) + } + RPCResponse::DataColumnsByRange(resp) => { + self.build_response(id, peer_id, Response::DataColumnsByRange(Some(resp))) + } // Should never be reached RPCResponse::LightClientBootstrap(bootstrap) => { self.build_response(id, peer_id, Response::LightClientBootstrap(bootstrap)) @@ -1602,6 +1630,8 @@ impl Network { ResponseTermination::BlocksByRoot => Response::BlocksByRoot(None), ResponseTermination::BlobsByRange => Response::BlobsByRange(None), ResponseTermination::BlobsByRoot => Response::BlobsByRoot(None), + ResponseTermination::DataColumnsByRoot => Response::DataColumnsByRoot(None), + ResponseTermination::DataColumnsByRange => Response::DataColumnsByRange(None), }; self.build_response(id, peer_id, response) } diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index ffb01a99efb..9fb14fdcb8c 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -8,7 +8,9 @@ use beacon_processor::{ DuplicateCache, GossipAggregatePackage, GossipAttestationPackage, Work, WorkEvent as BeaconWorkEvent, }; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use lighthouse_network::{ rpc::{BlocksByRangeRequest, BlocksByRootRequest, LightClientBootstrapRequest, StatusMessage}, Client, MessageId, NetworkGlobals, PeerId, PeerRequestId, @@ -602,6 +604,40 @@ impl NetworkBeaconProcessor { }) } + /// Create a new work event to process `DataColumnsByRootRequest`s from the RPC network. + pub fn send_data_columns_by_roots_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_data_columns_by_root_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::DataColumnsByRootsRequest(Box::new(process_fn)), + }) + } + + /// Create a new work event to process `DataColumnsByRange`s from the RPC network. + pub fn send_data_columns_by_range_request( + self: &Arc, + peer_id: PeerId, + request_id: PeerRequestId, + request: DataColumnsByRangeRequest, + ) -> Result<(), Error> { + let processor = self.clone(); + let process_fn = + move || processor.handle_data_columns_by_range_request(peer_id, request_id, request); + + self.try_send(BeaconWorkEvent { + drop_during_sync: false, + work: Work::DataColumnsByRangeRequest(Box::new(process_fn)), + }) + } + /// Create a new work event to process `LightClientBootstrap`s from the RPC network. pub fn send_light_client_bootstrap_request( self: &Arc, diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index 2a0c7ea089b..3f8cf14dcbe 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -4,7 +4,9 @@ use crate::status::ToStatusMessage; use crate::sync::SyncMessage; use beacon_chain::{BeaconChainError, BeaconChainTypes, HistoricalBlockError, WhenSlotSkipped}; use itertools::process_results; -use lighthouse_network::rpc::methods::{BlobsByRangeRequest, BlobsByRootRequest}; +use lighthouse_network::rpc::methods::{ + BlobsByRangeRequest, BlobsByRootRequest, DataColumnsByRangeRequest, DataColumnsByRootRequest, +}; use lighthouse_network::rpc::*; use lighthouse_network::{PeerId, PeerRequestId, ReportSource, Response, SyncInfo}; use slog::{debug, error, warn}; @@ -314,6 +316,20 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `DataColumnsByRoot` request from the peer. + pub fn handle_data_columns_by_root_request( + self: Arc, + peer_id: PeerId, + _request_id: PeerRequestId, + request: DataColumnsByRootRequest, + ) { + // TODO(das): implement handler + debug!(self.log, "Received DataColumnsByRoot Request"; + "peer_id" => %peer_id, + "count" => request.data_column_ids.len() + ); + } + /// Handle a `LightClientBootstrap` request from the peer. pub fn handle_light_client_bootstrap( self: &Arc, @@ -815,6 +831,21 @@ impl NetworkBeaconProcessor { Ok(()) } + /// Handle a `DataColumnsByRange` request from the peer. + pub fn handle_data_columns_by_range_request( + self: Arc, + peer_id: PeerId, + _request_id: PeerRequestId, + req: DataColumnsByRangeRequest, + ) { + // TODO(das): implement handler + debug!(self.log, "Received DataColumnsByRange Request"; + "peer_id" => %peer_id, + "count" => req.count, + "start_slot" => req.start_slot, + ); + } + /// Helper function to ensure single item protocol always end with either a single chunk or an /// error fn terminate_response_single_item Response>( diff --git a/beacon_node/network/src/router.rs b/beacon_node/network/src/router.rs index c162d52d026..a5e27f582af 100644 --- a/beacon_node/network/src/router.rs +++ b/beacon_node/network/src/router.rs @@ -27,7 +27,7 @@ use std::sync::Arc; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use tokio::sync::mpsc; use tokio_stream::wrappers::UnboundedReceiverStream; -use types::{BlobSidecar, EthSpec, SignedBeaconBlock}; +use types::{BlobSidecar, DataColumnSidecar, EthSpec, SignedBeaconBlock}; /// Handles messages from the network and routes them to the appropriate service to be handled. pub struct Router { @@ -216,6 +216,14 @@ impl Router { self.network_beacon_processor .send_blobs_by_roots_request(peer_id, request_id, request), ), + Request::DataColumnsByRoot(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_data_columns_by_roots_request(peer_id, request_id, request), + ), + Request::DataColumnsByRange(request) => self.handle_beacon_processor_send_result( + self.network_beacon_processor + .send_data_columns_by_range_request(peer_id, request_id, request), + ), Request::LightClientBootstrap(request) => self.handle_beacon_processor_send_result( self.network_beacon_processor .send_light_client_bootstrap_request(peer_id, request_id, request), @@ -258,6 +266,12 @@ impl Router { Response::BlobsByRoot(blob) => { self.on_blobs_by_root_response(peer_id, request_id, blob); } + Response::DataColumnsByRoot(data_column) => { + self.on_data_columns_by_root_response(peer_id, request_id, data_column); + } + Response::DataColumnsByRange(data_column) => { + self.on_data_columns_by_range_response(peer_id, request_id, data_column); + } // Light client responses should not be received Response::LightClientBootstrap(_) | Response::LightClientOptimisticUpdate(_) @@ -507,11 +521,11 @@ impl Router { ) { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { - SyncRequestId::SingleBlock { .. } | SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Block lookups do not request BBRange requests"; "peer_id" => %peer_id); + id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, + other => { + crit!(self.log, "BlocksByRange response on incorrect request"; "request" => ?other); return; } - id @ SyncRequestId::RangeBlockAndBlobs { .. } => id, }, AppRequestId::Router => { crit!(self.log, "All BBRange requests belong to sync"; "peer_id" => %peer_id); @@ -570,12 +584,8 @@ impl Router { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlock { .. } => id, - SyncRequestId::RangeBlockAndBlobs { .. } => { - crit!(self.log, "Batch syncing do not request BBRoot requests"; "peer_id" => %peer_id); - return; - } - SyncRequestId::SingleBlob { .. } => { - crit!(self.log, "Blob response to block by roots request"; "peer_id" => %peer_id); + other => { + crit!(self.log, "BlocksByRoot response on incorrect request"; "request" => ?other); return; } }, @@ -608,12 +618,8 @@ impl Router { let request_id = match request_id { AppRequestId::Sync(sync_id) => match sync_id { id @ SyncRequestId::SingleBlob { .. } => id, - SyncRequestId::SingleBlock { .. } => { - crit!(self.log, "Block response to blobs by roots request"; "peer_id" => %peer_id); - return; - } - SyncRequestId::RangeBlockAndBlobs { .. } => { - crit!(self.log, "Batch syncing does not request BBRoot requests"; "peer_id" => %peer_id); + other => { + crit!(self.log, "BlobsByRoot response on incorrect request"; "request" => ?other); return; } }, @@ -636,6 +642,67 @@ impl Router { }); } + /// Handle a `DataColumnsByRoot` response from the peer. + pub fn on_data_columns_by_root_response( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + data_column: Option>>, + ) { + let request_id = match request_id { + AppRequestId::Sync(sync_id) => match sync_id { + id @ SyncRequestId::DataColumnsByRoot { .. } => id, + other => { + crit!(self.log, "DataColumnsByRoot response on incorrect request"; "request" => ?other); + return; + } + }, + AppRequestId::Router => { + crit!(self.log, "All DataColumnsByRoot requests belong to sync"; "peer_id" => %peer_id); + return; + } + }; + + trace!( + self.log, + "Received DataColumnsByRoot Response"; + "peer" => %peer_id, + ); + self.send_to_sync(SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column, + seen_timestamp: timestamp_now(), + }); + } + + pub fn on_data_columns_by_range_response( + &mut self, + peer_id: PeerId, + request_id: AppRequestId, + data_column: Option>>, + ) { + trace!( + self.log, + "Received DataColumnsByRange Response"; + "peer" => %peer_id, + ); + + if let AppRequestId::Sync(id) = request_id { + self.send_to_sync(SyncMessage::RpcDataColumn { + peer_id, + request_id: id, + data_column, + seen_timestamp: timestamp_now(), + }); + } else { + crit!( + self.log, + "All data columns by range responses should belong to sync" + ); + } + } + fn handle_beacon_processor_send_result( &mut self, result: Result<(), crate::network_beacon_processor::Error>, diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 7149395839b..e8e6896cd69 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -101,6 +101,14 @@ pub enum SyncMessage { seen_timestamp: Duration, }, + /// A data columns has been received from the RPC + RpcDataColumn { + request_id: SyncRequestId, + peer_id: PeerId, + data_column: Option>>, + seen_timestamp: Duration, + }, + /// A block with an unknown parent has been received. UnknownParentBlock(PeerId, RpcBlock, Hash256), @@ -337,6 +345,9 @@ impl SyncManager { SyncRequestId::SingleBlob { id } => { self.on_single_blob_response(id, peer_id, RpcEvent::RPCError(error)) } + SyncRequestId::DataColumnsByRoot { .. } => { + // TODO(das) + } SyncRequestId::RangeBlockAndBlobs { id } => { if let Some(sender_id) = self.network.range_request_failed(id) { match sender_id { @@ -614,6 +625,12 @@ impl SyncManager { blob_sidecar, seen_timestamp, } => self.rpc_blob_received(request_id, peer_id, blob_sidecar, seen_timestamp), + SyncMessage::RpcDataColumn { + request_id, + peer_id, + data_column, + seen_timestamp, + } => self.rpc_data_column_received(request_id, peer_id, data_column, seen_timestamp), SyncMessage::UnknownParentBlock(peer_id, block, block_root) => { let block_slot = block.slot(); let parent_root = block.parent_root(); @@ -846,6 +863,9 @@ impl SyncManager { SyncRequestId::SingleBlob { .. } => { crit!(self.log, "Block received during blob request"; "peer_id" => %peer_id ); } + SyncRequestId::DataColumnsByRoot { .. } => { + // TODO(das) + } SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, block.into()) } @@ -888,12 +908,25 @@ impl SyncManager { None => RpcEvent::StreamTermination, }, ), + SyncRequestId::DataColumnsByRoot { .. } => { + // TODO(das) + } SyncRequestId::RangeBlockAndBlobs { id } => { self.range_block_and_blobs_response(id, peer_id, blob.into()) } } } + fn rpc_data_column_received( + &mut self, + _request_id: SyncRequestId, + _peer_id: PeerId, + _data_column: Option>>, + _seen_timestamp: Duration, + ) { + // TODO(das): implement handler + } + fn on_single_blob_response( &mut self, id: SingleLookupReqId, From 6dc614fede50be066f7db7e1289c7fa7275c8b0f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 13 Aug 2024 10:16:17 +1000 Subject: [PATCH 17/18] Add PeerDAS KZG lib integration (construction & KZG verification) (#6212) * Add peerdas KZG library and use it for data column construction and cell kzg verification (#5701, #5941, #6118, #6179) Co-authored-by: kevaundray * Update `rust_eth_kzg` crate to published version. * Update kzg metrics buckets. * Merge branch 'unstable' into peerdas-kzg * Update KZG version to fix windows mem allocation. * Refactor common logic from build sidecar and reconstruction. Remove unnecessary `needless_lifetimes`. Co-authored-by: realbigsean * Copy existing trusted setup into `PeerDASTrustedSetup` for consistency and maintain `--trusted-setup` functionality. * Merge branch 'unstable' into peerdas-kzg * Merge branch 'peerdas-kzg' of github.com:jimmygchen/lighthouse into peerdas-kzg * Merge branch 'unstable' into peerdas-kzg * Merge branch 'unstable' into peerdas-kzg * Load PeerDAS KZG only if PeerDAS is enabled. --- Cargo.lock | 93 +++++ Cargo.toml | 1 + .../src/data_column_verification.rs | 16 +- beacon_node/beacon_chain/src/kzg_utils.rs | 328 +++++++++++++++++- beacon_node/beacon_chain/src/metrics.rs | 27 ++ beacon_node/client/src/builder.rs | 19 +- consensus/types/src/data_column_sidecar.rs | 212 +---------- crypto/kzg/Cargo.toml | 10 + crypto/kzg/benches/benchmark.rs | 31 ++ crypto/kzg/src/lib.rs | 151 ++++---- crypto/kzg/src/trusted_setup.rs | 23 ++ 11 files changed, 627 insertions(+), 284 deletions(-) create mode 100644 crypto/kzg/benches/benchmark.rs diff --git a/Cargo.lock b/Cargo.lock index 9afb3635f12..df005da6961 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1029,6 +1029,22 @@ dependencies = [ "zeroize", ] +[[package]] +name = "blstrs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a8a8ed6fefbeef4a8c7b460e4110e12c5e22a5b7cf32621aae6ad650c4dcf29" +dependencies = [ + "blst", + "byte-slice-cast", + "ff 0.13.0", + "group 0.13.0", + "pairing", + "rand_core", + "serde", + "subtle", +] + [[package]] name = "bollard-stubs" version = "1.42.0-rc.3" @@ -1514,6 +1530,52 @@ dependencies = [ "libc", ] +[[package]] +name = "crate_crypto_internal_eth_kzg_bls12_381" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8761b04feb6031ffaf93933c955a0c91a2f3ce15dcac6b9586d2487fe55abf0b" +dependencies = [ + "blst", + "blstrs", + "ff 0.13.0", + "group 0.13.0", + "pairing", + "rayon", +] + +[[package]] +name = "crate_crypto_internal_eth_kzg_erasure_codes" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca410dff79524a2babe8a0d9ab5fdce21b16808f8189eb8b6da6159681f8de2" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_polynomial", +] + +[[package]] +name = "crate_crypto_internal_eth_kzg_polynomial" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68be1a5f16bc1c09254dec5209e22278d7d395284443576886a5890e7131234f" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", +] + +[[package]] +name = "crate_crypto_kzg_multi_open_fk20" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702fe5b687fe8c5a46851b8bc624ad49603a339dc93c920d4f7e61592c201ee8" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_polynomial", + "hex", + "rayon", + "sha2 0.10.8", +] + [[package]] name = "crc32fast" version = "1.4.2" @@ -3001,6 +3063,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ + "bitvec 1.0.1", "rand_core", "subtle", ] @@ -3431,7 +3494,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", + "rand", "rand_core", + "rand_xorshift", "subtle", ] @@ -4342,13 +4407,17 @@ version = "0.1.0" dependencies = [ "arbitrary", "c-kzg", + "criterion", "derivative", + "eth2_network_config", "ethereum_hashing", "ethereum_serde_utils", "ethereum_ssz", "ethereum_ssz_derive", "hex", + "rust_eth_kzg", "serde", + "serde_json", "tree_hash", ] @@ -5897,6 +5966,15 @@ dependencies = [ "sha2 0.10.8", ] +[[package]] +name = "pairing" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81fec4625e73cf41ef4bb6846cafa6d44736525f442ba45e407c4a000a13996f" +dependencies = [ + "group 0.13.0", +] + [[package]] name = "parity-scale-codec" version = "2.3.1" @@ -7015,6 +7093,21 @@ dependencies = [ "smallvec", ] +[[package]] +name = "rust_eth_kzg" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "013a850c7e131a8f9651ffbb151dc33240234f21dd357b692bd5ff4cdc84bf9a" +dependencies = [ + "crate_crypto_internal_eth_kzg_bls12_381", + "crate_crypto_internal_eth_kzg_erasure_codes", + "crate_crypto_kzg_multi_open_fk20", + "hex", + "rayon", + "serde", + "serde_json", +] + [[package]] name = "rustc-demangle" version = "0.1.24" diff --git a/Cargo.toml b/Cargo.toml index cf3fd0ab043..901fff2ea60 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -114,6 +114,7 @@ delay_map = "0.3" derivative = "2" dirs = "3" either = "1.9" +rust_eth_kzg = "0.3.4" discv5 = { version = "0.4.1", features = ["libp2p"] } env_logger = "0.9" error-chain = "0.12" diff --git a/beacon_node/beacon_chain/src/data_column_verification.rs b/beacon_node/beacon_chain/src/data_column_verification.rs index fa31d6f2e8e..da639e3695e 100644 --- a/beacon_node/beacon_chain/src/data_column_verification.rs +++ b/beacon_node/beacon_chain/src/data_column_verification.rs @@ -2,7 +2,8 @@ use crate::block_verification::{ cheap_state_advance_to_obtain_committees, get_validator_pubkey_cache, process_block_slash_info, BlockSlashInfo, }; -use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use crate::kzg_utils::validate_data_columns; +use crate::{metrics, BeaconChain, BeaconChainError, BeaconChainTypes}; use derivative::Derivative; use fork_choice::ProtoBlock; use kzg::{Error as KzgError, Kzg}; @@ -11,6 +12,7 @@ use slasher::test_utils::E; use slog::debug; use slot_clock::SlotClock; use ssz_derive::{Decode, Encode}; +use std::iter; use std::sync::Arc; use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier}; use types::{ @@ -255,9 +257,10 @@ impl KzgVerifiedCustodyDataColumn { /// Returns an error if the kzg verification check fails. pub fn verify_kzg_for_data_column( data_column: Arc>, - _kzg: &Kzg, + kzg: &Kzg, ) -> Result, KzgError> { - // TODO(das): KZG verification to be implemented + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES); + validate_data_columns(kzg, iter::once(&data_column))?; Ok(KzgVerifiedDataColumn { data: data_column }) } @@ -267,13 +270,14 @@ pub fn verify_kzg_for_data_column( /// Note: This function should be preferred over calling `verify_kzg_for_data_column` /// in a loop since this function kzg verifies a list of data columns more efficiently. pub fn verify_kzg_for_data_column_list<'a, E: EthSpec, I>( - _data_column_iter: I, - _kzg: &'a Kzg, + data_column_iter: I, + kzg: &'a Kzg, ) -> Result<(), KzgError> where I: Iterator>> + Clone, { - // TODO(das): implement KZG verification + let _timer = metrics::start_timer(&metrics::KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES); + validate_data_columns(kzg, data_column_iter)?; Ok(()) } diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index b554133875a..55c1ee9e980 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -1,5 +1,15 @@ -use kzg::{Blob as KzgBlob, Error as KzgError, Kzg}; -use types::{Blob, EthSpec, Hash256, KzgCommitment, KzgProof}; +use kzg::{ + Blob as KzgBlob, Bytes48, CellRef as KzgCellRef, CellsAndKzgProofs, Error as KzgError, Kzg, +}; +use rayon::prelude::*; +use ssz_types::FixedVector; +use std::sync::Arc; +use types::beacon_block_body::KzgCommitments; +use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; +use types::{ + Blob, BlobsList, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, + Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, +}; /// Converts a blob ssz List object to an array to be used with the kzg /// crypto library. @@ -7,6 +17,15 @@ fn ssz_blob_to_crypto_blob(blob: &Blob) -> Result(cell: &Cell) -> Result { + let cell_bytes: &[u8] = cell.as_ref(); + Ok(cell_bytes + .try_into() + .expect("expected cell to have size {BYTES_PER_CELL}. This should be guaranteed by the `FixedVector type")) +} + /// Validate a single blob-commitment-proof triplet from a `BlobSidecar`. pub fn validate_blob( kzg: &Kzg, @@ -19,6 +38,50 @@ pub fn validate_blob( kzg.verify_blob_kzg_proof(&kzg_blob, kzg_commitment, kzg_proof) } +/// Validate a batch of `DataColumnSidecar`. +pub fn validate_data_columns<'a, E: EthSpec, I>( + kzg: &Kzg, + data_column_iter: I, +) -> Result<(), KzgError> +where + I: Iterator>> + Clone, +{ + let cells = data_column_iter + .clone() + .flat_map(|data_column| data_column.column.iter().map(ssz_cell_to_crypto_cell::)) + .collect::, KzgError>>()?; + + let proofs = data_column_iter + .clone() + .flat_map(|data_column| { + data_column + .kzg_proofs + .iter() + .map(|&proof| Bytes48::from(proof)) + }) + .collect::>(); + + let column_indices = data_column_iter + .clone() + .flat_map(|data_column| { + let col_index = data_column.index; + data_column.column.iter().map(move |_| col_index) + }) + .collect::>(); + + let commitments = data_column_iter + .clone() + .flat_map(|data_column| { + data_column + .kzg_commitments + .iter() + .map(|&commitment| Bytes48::from(commitment)) + }) + .collect::>(); + + kzg.verify_cell_proof_batch(&cells, &proofs, column_indices, &commitments) +} + /// Validate a batch of blob-commitment-proof triplets from multiple `BlobSidecars`. pub fn validate_blobs( kzg: &Kzg, @@ -76,3 +139,264 @@ pub fn verify_kzg_proof( ) -> Result { kzg.verify_kzg_proof(kzg_commitment, &z.0.into(), &y.0.into(), kzg_proof) } + +/// Build data column sidecars from a signed beacon block and its blobs. +pub fn blobs_to_data_column_sidecars( + blobs: &BlobsList, + block: &SignedBeaconBlock, + kzg: &Kzg, + spec: &ChainSpec, +) -> Result, DataColumnSidecarError> { + if blobs.is_empty() { + return Ok(vec![]); + } + let kzg_commitments = block + .message() + .body() + .blob_kzg_commitments() + .map_err(|_err| DataColumnSidecarError::PreDeneb)?; + let kzg_commitments_inclusion_proof = block.message().body().kzg_commitments_merkle_proof()?; + let signed_block_header = block.signed_block_header(); + + // NOTE: assumes blob sidecars are ordered by index + let blob_cells_and_proofs_vec = blobs + .into_par_iter() + .map(|blob| { + let blob = blob + .as_ref() + .try_into() + .expect("blob should have a guaranteed size due to FixedVector"); + kzg.compute_cells_and_proofs(blob) + }) + .collect::, KzgError>>()?; + + build_data_column_sidecars( + kzg_commitments.clone(), + kzg_commitments_inclusion_proof, + signed_block_header, + blob_cells_and_proofs_vec, + spec, + ) + .map_err(DataColumnSidecarError::BuildSidecarFailed) +} + +fn build_data_column_sidecars( + kzg_commitments: KzgCommitments, + kzg_commitments_inclusion_proof: FixedVector, + signed_block_header: SignedBeaconBlockHeader, + blob_cells_and_proofs_vec: Vec, + spec: &ChainSpec, +) -> Result, String> { + let number_of_columns = spec.number_of_columns; + let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let mut column_kzg_proofs = + vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + + for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { + // we iterate over each column, and we construct the column from "top to bottom", + // pushing on the cell and the corresponding proof at each column index. we do this for + // each blob (i.e. the outer loop). + for col in 0..number_of_columns { + let cell = blob_cells + .get(col) + .ok_or(format!("Missing blob cell at index {col}"))?; + let cell: Vec = cell.to_vec(); + let cell = Cell::::from(cell); + + let proof = blob_cell_proofs + .get(col) + .ok_or(format!("Missing blob cell KZG proof at index {col}"))?; + + let column = columns + .get_mut(col) + .ok_or(format!("Missing data column at index {col}"))?; + let column_proofs = column_kzg_proofs + .get_mut(col) + .ok_or(format!("Missing data column proofs at index {col}"))?; + + column.push(cell); + column_proofs.push(*proof); + } + } + + let sidecars: Vec>> = columns + .into_iter() + .zip(column_kzg_proofs) + .enumerate() + .map(|(index, (col, proofs))| { + Arc::new(DataColumnSidecar { + index: index as u64, + column: DataColumn::::from(col), + kzg_commitments: kzg_commitments.clone(), + kzg_proofs: KzgProofs::::from(proofs), + signed_block_header: signed_block_header.clone(), + kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), + }) + }) + .collect(); + + Ok(sidecars) +} + +/// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). +pub fn reconstruct_data_columns( + kzg: &Kzg, + data_columns: &[Arc>], + spec: &ChainSpec, +) -> Result, KzgError> { + let first_data_column = data_columns + .first() + .ok_or(KzgError::InconsistentArrayLength( + "data_columns should have at least one element".to_string(), + ))?; + let num_of_blobs = first_data_column.kzg_commitments.len(); + + let blob_cells_and_proofs_vec = + (0..num_of_blobs) + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column.column.get(row_index).ok_or( + KzgError::InconsistentArrayLength(format!( + "Missing data column at index {row_index}" + )), + )?; + + cells.push(ssz_cell_to_crypto_cell::(cell)?); + cell_ids.push(data_column.index); + } + kzg.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + }) + .collect::, KzgError>>()?; + + // Clone sidecar elements from existing data column, no need to re-compute + build_data_column_sidecars( + first_data_column.kzg_commitments.clone(), + first_data_column.kzg_commitments_inclusion_proof.clone(), + first_data_column.signed_block_header.clone(), + blob_cells_and_proofs_vec, + spec, + ) + .map_err(KzgError::ReconstructFailed) +} + +#[cfg(test)] +mod test { + use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use bls::Signature; + use eth2_network_config::TRUSTED_SETUP_BYTES; + use kzg::{Kzg, KzgCommitment, TrustedSetup}; + use types::{ + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, + ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + }; + + type E = MainnetEthSpec; + + // Loading and initializing PeerDAS KZG is expensive and slow, so we group the tests together + // only load it once. + #[test] + fn test_build_data_columns_sidecars() { + let spec = E::default_spec(); + let kzg = get_kzg(); + test_build_data_columns_empty(&kzg, &spec); + test_build_data_columns(&kzg, &spec); + test_reconstruct_data_columns(&kzg, &spec); + } + + #[track_caller] + fn test_build_data_columns_empty(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 0; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + assert!(column_sidecars.is_empty()); + } + + #[track_caller] + fn test_build_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + + let block_kzg_commitments = signed_block + .message() + .body() + .blob_kzg_commitments() + .unwrap() + .clone(); + let block_kzg_commitments_inclusion_proof = signed_block + .message() + .body() + .kzg_commitments_merkle_proof() + .unwrap(); + + assert_eq!(column_sidecars.len(), spec.number_of_columns); + for (idx, col_sidecar) in column_sidecars.iter().enumerate() { + assert_eq!(col_sidecar.index, idx as u64); + + assert_eq!(col_sidecar.kzg_commitments.len(), num_of_blobs); + assert_eq!(col_sidecar.column.len(), num_of_blobs); + assert_eq!(col_sidecar.kzg_proofs.len(), num_of_blobs); + + assert_eq!(col_sidecar.kzg_commitments, block_kzg_commitments); + assert_eq!( + col_sidecar.kzg_commitments_inclusion_proof, + block_kzg_commitments_inclusion_proof + ); + assert!(col_sidecar.verify_inclusion_proof()); + } + } + + #[track_caller] + fn test_reconstruct_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blob_sidecars) = create_test_block_and_blobs::(num_of_blobs, spec); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_sidecars, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let reconstructed_columns = reconstruct_data_columns( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + spec, + ) + .unwrap(); + + for i in 0..spec.number_of_columns { + assert_eq!(reconstructed_columns.get(i), column_sidecars.get(i), "{i}"); + } + } + + fn get_kzg() -> Kzg { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).expect("should create kzg") + } + + fn create_test_block_and_blobs( + num_of_blobs: usize, + spec: &ChainSpec, + ) -> (SignedBeaconBlock, BlobsList) { + let mut block = BeaconBlock::Deneb(BeaconBlockDeneb::empty(spec)); + let mut body = block.body_mut(); + let blob_kzg_commitments = body.blob_kzg_commitments_mut().unwrap(); + *blob_kzg_commitments = + KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) + .unwrap(); + + let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let blobs = (0..num_of_blobs) + .map(|_| Blob::::default()) + .collect::>() + .into(); + + (signed_block, blobs) + } +} diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index b8969b31f1e..0309c4995e1 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1645,6 +1645,13 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> "Time taken to compute blob sidecar inclusion proof", ) }); +pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { + try_create_histogram_with_buckets( + "data_column_sidecar_computation_seconds", + "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", + Ok(vec![0.04, 0.05, 0.1, 0.2, 0.3, 0.5, 0.7, 1.0]), + ) +}); pub static DATA_COLUMN_SIDECAR_PROCESSING_REQUESTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( @@ -1785,6 +1792,26 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: "Runtime of batched kzg verification", ) }); +pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "kzg_verification_data_column_single_seconds", + "Runtime of single data column kzg verification", + Ok(vec![ + 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, + ]), + ) + }); +pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = + LazyLock::new(|| { + try_create_histogram_with_buckets( + "kzg_verification_data_column_batch_seconds", + "Runtime of batched data column kzg verification", + Ok(vec![ + 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, + ]), + ) + }); pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( || { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 6695f3c4bc1..d299eebec8e 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -10,7 +10,6 @@ use beacon_chain::graffiti_calculator::start_engine_version_cache_refresh_servic use beacon_chain::otb_verification_service::start_otb_verification_service; use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; -use beacon_chain::LightClientProducerEvent; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, eth1_chain::{CachingEth1Backend, Eth1Chain}, @@ -19,6 +18,7 @@ use beacon_chain::{ store::{HotColdDB, ItemStore, LevelDB, StoreConfig}, BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, }; +use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; @@ -505,7 +505,7 @@ where deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( config.eth1, context.log().clone(), - spec, + spec.clone(), &snapshot, ) { Ok(service) => { @@ -624,12 +624,15 @@ where }; let beacon_chain_builder = if let Some(trusted_setup) = config.trusted_setup { - let kzg = trusted_setup - .try_into() - .map(Arc::new) - .map(Some) - .map_err(|e| format!("Failed to load trusted setup: {:?}", e))?; - beacon_chain_builder.kzg(kzg) + let kzg_err_msg = |e| format!("Failed to load trusted setup: {:?}", e); + + let kzg = if spec.is_peer_das_scheduled() { + Kzg::new_from_trusted_setup_das_enabled(trusted_setup).map_err(kzg_err_msg)? + } else { + Kzg::new_from_trusted_setup(trusted_setup).map_err(kzg_err_msg)? + }; + + beacon_chain_builder.kzg(Some(Arc::new(kzg))) } else { beacon_chain_builder }; diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index a0e3ca6cce3..90c05aea1f7 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,17 +1,12 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; -use crate::{ - BeaconBlockHeader, ChainSpec, EthSpec, Hash256, KzgProofs, SignedBeaconBlock, - SignedBeaconBlockHeader, Slot, -}; -use crate::{BeaconStateError, BlobsList}; +use crate::BeaconStateError; +use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; -use kzg::Kzg; -use kzg::{Blob as KzgBlob, Cell as KzgCell, Error as KzgError}; +use kzg::Error as KzgError; use kzg::{KzgCommitment, KzgProof}; use merkle_proof::verify_merkle_proof; -use rayon::prelude::*; use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; @@ -60,7 +55,7 @@ pub struct DataColumnSidecar { pub index: ColumnIndex, #[serde(with = "ssz_types::serde_utils::list_of_hex_fixed_vec")] pub column: DataColumn, - /// All of the KZG commitments and proofs associated with the block, used for verifying sample cells. + /// All the KZG commitments and proofs associated with the block, used for verifying sample cells. pub kzg_commitments: KzgCommitments, pub kzg_proofs: KzgProofs, pub signed_block_header: SignedBeaconBlockHeader, @@ -98,197 +93,6 @@ impl DataColumnSidecar { ) } - pub fn build_sidecars( - blobs: &BlobsList, - block: &SignedBeaconBlock, - kzg: &Kzg, - spec: &ChainSpec, - ) -> Result, DataColumnSidecarError> { - let number_of_columns = spec.number_of_columns; - if blobs.is_empty() { - return Ok(vec![]); - } - let kzg_commitments = block - .message() - .body() - .blob_kzg_commitments() - .map_err(|_err| DataColumnSidecarError::PreDeneb)?; - let kzg_commitments_inclusion_proof = - block.message().body().kzg_commitments_merkle_proof()?; - let signed_block_header = block.signed_block_header(); - - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - - // NOTE: assumes blob sidecars are ordered by index - let blob_cells_and_proofs_vec = blobs - .into_par_iter() - .map(|blob| { - let blob = KzgBlob::from_bytes(blob).map_err(KzgError::from)?; - kzg.compute_cells_and_proofs(&blob) - }) - .collect::, KzgError>>()?; - - for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { - // we iterate over each column, and we construct the column from "top to bottom", - // pushing on the cell and the corresponding proof at each column index. we do this for - // each blob (i.e. the outer loop). - for col in 0..number_of_columns { - let cell = - blob_cells - .get(col) - .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing blob cell at index {col}" - )))?; - let cell: Vec = cell.into_inner().into_iter().collect(); - let cell = Cell::::from(cell); - - let proof = blob_cell_proofs.get(col).ok_or( - DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing blob cell KZG proof at index {col}" - )), - )?; - - let column = - columns - .get_mut(col) - .ok_or(DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing data column at index {col}" - )))?; - let column_proofs = column_kzg_proofs.get_mut(col).ok_or( - DataColumnSidecarError::InconsistentArrayLength(format!( - "Missing data column proofs at index {col}" - )), - )?; - - column.push(cell); - column_proofs.push(*proof); - } - } - - let sidecars: Vec>> = columns - .into_iter() - .zip(column_kzg_proofs) - .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) - .collect(); - - Ok(sidecars) - } - - pub fn reconstruct( - kzg: &Kzg, - data_columns: &[Arc], - spec: &ChainSpec, - ) -> Result>, KzgError> { - let number_of_columns = spec.number_of_columns; - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - - let first_data_column = data_columns - .first() - .ok_or(KzgError::InconsistentArrayLength( - "data_columns should have at least one element".to_string(), - ))?; - let num_of_blobs = first_data_column.kzg_commitments.len(); - - let blob_cells_and_proofs_vec = (0..num_of_blobs) - .into_par_iter() - .map(|row_index| { - let mut cells: Vec = vec![]; - let mut cell_ids: Vec = vec![]; - for data_column in data_columns { - let cell = data_column.column.get(row_index).ok_or( - KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" - )), - )?; - - cells.push(ssz_cell_to_crypto_cell::(cell)?); - cell_ids.push(data_column.index); - } - // recover_all_cells does not expect sorted - let all_cells = kzg.recover_all_cells(&cell_ids, &cells)?; - let blob = kzg.cells_to_blob(&all_cells)?; - - // Note: This function computes all cells and proofs. According to Justin this is okay, - // computing a partial set may be more expensive and requires code paths that don't exist. - // Computing the blobs cells is technically unnecessary but very cheap. It's done here again - // for simplicity. - kzg.compute_cells_and_proofs(&blob) - }) - .collect::, KzgError>>()?; - - for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { - // we iterate over each column, and we construct the column from "top to bottom", - // pushing on the cell and the corresponding proof at each column index. we do this for - // each blob (i.e. the outer loop). - for col in 0..number_of_columns { - let cell = blob_cells - .get(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing blob cell at index {col}" - )))?; - let cell: Vec = cell.into_inner().into_iter().collect(); - let cell = Cell::::from(cell); - - let proof = blob_cell_proofs - .get(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing blob cell KZG proof at index {col}" - )))?; - - let column = columns - .get_mut(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing data column at index {col}" - )))?; - let column_proofs = - column_kzg_proofs - .get_mut(col) - .ok_or(KzgError::InconsistentArrayLength(format!( - "Missing data column proofs at index {col}" - )))?; - - column.push(cell); - column_proofs.push(*proof); - } - } - - // Clone sidecar elements from existing data column, no need to re-compute - let kzg_commitments = &first_data_column.kzg_commitments; - let signed_block_header = &first_data_column.signed_block_header; - let kzg_commitments_inclusion_proof = &first_data_column.kzg_commitments_inclusion_proof; - - let sidecars: Vec>> = columns - .into_iter() - .zip(column_kzg_proofs) - .enumerate() - .map(|(index, (col, proofs))| { - Arc::new(DataColumnSidecar { - index: index as u64, - column: DataColumn::::from(col), - kzg_commitments: kzg_commitments.clone(), - kzg_proofs: KzgProofs::::from(proofs), - signed_block_header: signed_block_header.clone(), - kzg_commitments_inclusion_proof: kzg_commitments_inclusion_proof.clone(), - }) - }) - .collect(); - Ok(sidecars) - } - pub fn min_size() -> usize { // min size is one cell Self { @@ -360,7 +164,7 @@ pub enum DataColumnSidecarError { MissingBlobSidecars, PreDeneb, SszError(SszError), - InconsistentArrayLength(String), + BuildSidecarFailed(String), } impl From for DataColumnSidecarError { @@ -386,9 +190,3 @@ impl From for DataColumnSidecarError { Self::SszError(e) } } - -/// Converts a cell ssz List object to an array to be used with the kzg -/// crypto library. -fn ssz_cell_to_crypto_cell(cell: &Cell) -> Result { - KzgCell::from_bytes(cell.as_ref()).map_err(Into::into) -} diff --git a/crypto/kzg/Cargo.toml b/crypto/kzg/Cargo.toml index d26dfe4992a..e940fe2e20c 100644 --- a/crypto/kzg/Cargo.toml +++ b/crypto/kzg/Cargo.toml @@ -17,3 +17,13 @@ ethereum_serde_utils = { workspace = true } hex = { workspace = true } ethereum_hashing = { workspace = true } c-kzg = { workspace = true } +rust_eth_kzg = { workspace = true } + +[dev-dependencies] +criterion = { workspace = true } +serde_json = { workspace = true } +eth2_network_config = { workspace = true } + +[[bench]] +name = "benchmark" +harness = false diff --git a/crypto/kzg/benches/benchmark.rs b/crypto/kzg/benches/benchmark.rs new file mode 100644 index 00000000000..69ec94c0b1b --- /dev/null +++ b/crypto/kzg/benches/benchmark.rs @@ -0,0 +1,31 @@ +use c_kzg::KzgSettings; +use criterion::{criterion_group, criterion_main, Criterion}; +use eth2_network_config::TRUSTED_SETUP_BYTES; +use kzg::TrustedSetup; +use rust_eth_kzg::{DASContext, TrustedSetup as PeerDASTrustedSetup}; + +pub fn bench_init_context(c: &mut Criterion) { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + + c.bench_function(&format!("Initialize context rust_eth_kzg"), |b| { + b.iter(|| { + const NUM_THREADS: usize = 1; + let trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + DASContext::with_threads(&trusted_setup, NUM_THREADS) + }) + }); + c.bench_function(&format!("Initialize context c-kzg (4844)"), |b| { + b.iter(|| { + let trusted_setup: TrustedSetup = serde_json::from_reader(TRUSTED_SETUP_BYTES) + .map_err(|e| format!("Unable to read trusted setup file: {}", e)) + .expect("should have trusted setup"); + KzgSettings::load_trusted_setup(&trusted_setup.g1_points(), &trusted_setup.g2_points()) + .unwrap() + }) + }); +} + +criterion_group!(benches, bench_init_context); +criterion_main!(benches); diff --git a/crypto/kzg/src/lib.rs b/crypto/kzg/src/lib.rs index 181642df390..507db05cd56 100644 --- a/crypto/kzg/src/lib.rs +++ b/crypto/kzg/src/lib.rs @@ -2,6 +2,7 @@ mod kzg_commitment; mod kzg_proof; mod trusted_setup; +use rust_eth_kzg::{CellIndex, DASContext}; use std::fmt::Debug; pub use crate::{ @@ -9,18 +10,35 @@ pub use crate::{ kzg_proof::KzgProof, trusted_setup::TrustedSetup, }; + pub use c_kzg::{ Blob, Bytes32, Bytes48, KzgSettings, BYTES_PER_BLOB, BYTES_PER_COMMITMENT, BYTES_PER_FIELD_ELEMENT, BYTES_PER_PROOF, FIELD_ELEMENTS_PER_BLOB, }; + +pub use rust_eth_kzg::{ + constants::{BYTES_PER_CELL, CELLS_PER_EXT_BLOB}, + Cell, CellIndex as CellID, CellRef, TrustedSetup as PeerDASTrustedSetup, +}; + +pub type CellsAndKzgProofs = ([Cell; CELLS_PER_EXT_BLOB], [KzgProof; CELLS_PER_EXT_BLOB]); + +pub type KzgBlobRef<'a> = &'a [u8; BYTES_PER_BLOB]; + #[derive(Debug)] pub enum Error { /// An error from the underlying kzg library. Kzg(c_kzg::Error), + /// A prover/verifier error from the rust-eth-kzg library. + PeerDASKZG(rust_eth_kzg::Error), /// The kzg verification failed KzgVerificationFailed, /// Misc indexing error InconsistentArrayLength(String), + /// Error reconstructing data columns. + ReconstructFailed(String), + /// Kzg was not initialized with PeerDAS enabled. + DASContextUninitialized, } impl From for Error { @@ -29,32 +47,11 @@ impl From for Error { } } -pub const CELLS_PER_EXT_BLOB: usize = 128; - -// TODO(das): use proper crypto once ckzg merges das branch -#[allow(dead_code)] -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] -pub struct Cell { - bytes: [u8; 2048usize], -} - -impl Cell { - pub fn from_bytes(b: &[u8]) -> Result { - Ok(Self { - bytes: b - .try_into() - .map_err(|_| Error::Kzg(c_kzg::Error::MismatchLength("".to_owned())))?, - }) - } - pub fn into_inner(self) -> [u8; 2048usize] { - self.bytes - } -} - /// A wrapper over a kzg library that holds the trusted setup parameters. #[derive(Debug)] pub struct Kzg { trusted_setup: KzgSettings, + context: Option, } impl Kzg { @@ -65,9 +62,36 @@ impl Kzg { &trusted_setup.g1_points(), &trusted_setup.g2_points(), )?, + context: None, }) } + pub fn new_from_trusted_setup_das_enabled(trusted_setup: TrustedSetup) -> Result { + // Initialize the trusted setup using default parameters + // + // Note: One can also use `from_json` to initialize it from the consensus-specs + // json string. + let peerdas_trusted_setup = PeerDASTrustedSetup::from(&trusted_setup); + // Set the number of threads to be used + // + // we set it to 1 to match the c-kzg performance + const NUM_THREADS: usize = 1; + + let context = DASContext::with_threads(&peerdas_trusted_setup, NUM_THREADS); + + Ok(Self { + trusted_setup: KzgSettings::load_trusted_setup( + &trusted_setup.g1_points(), + &trusted_setup.g2_points(), + )?, + context: Some(context), + }) + } + + fn context(&self) -> Result<&DASContext, Error> { + self.context.as_ref().ok_or(Error::DASContextUninitialized) + } + /// Compute the kzg proof given a blob and its kzg commitment. pub fn compute_blob_kzg_proof( &self, @@ -167,21 +191,18 @@ impl Kzg { } /// Computes the cells and associated proofs for a given `blob` at index `index`. - #[allow(clippy::type_complexity)] pub fn compute_cells_and_proofs( &self, - _blob: &Blob, - ) -> Result< - ( - Box<[Cell; CELLS_PER_EXT_BLOB]>, - Box<[KzgProof; CELLS_PER_EXT_BLOB]>, - ), - Error, - > { - // TODO(das): use proper crypto once ckzg merges das branch - let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); - let proofs = Box::new([KzgProof([0u8; BYTES_PER_PROOF]); CELLS_PER_EXT_BLOB]); - Ok((cells, proofs)) + blob: KzgBlobRef<'_>, + ) -> Result { + let (cells, proofs) = self + .context()? + .compute_cells_and_kzg_proofs(blob) + .map_err(Error::PeerDASKZG)?; + + // Convert the proof type to a c-kzg proof type + let c_kzg_proof = proofs.map(KzgProof); + Ok((cells, c_kzg_proof)) } /// Verifies a batch of cell-proof-commitment triplets. @@ -191,35 +212,43 @@ impl Kzg { /// to the data column index. pub fn verify_cell_proof_batch( &self, - _cells: &[Cell], - _kzg_proofs: &[Bytes48], - _coordinates: &[(u64, u64)], - _kzg_commitments: &[Bytes48], + cells: &[CellRef<'_>], + kzg_proofs: &[Bytes48], + columns: Vec, + kzg_commitments: &[Bytes48], ) -> Result<(), Error> { - // TODO(das): use proper crypto once ckzg merges das branch - Ok(()) - } - - pub fn cells_to_blob(&self, _cells: &[Cell; CELLS_PER_EXT_BLOB]) -> Result { - // TODO(das): use proper crypto once ckzg merges das branch - Ok(Blob::new([0u8; 131072usize])) + let proofs: Vec<_> = kzg_proofs.iter().map(|proof| proof.as_ref()).collect(); + let commitments: Vec<_> = kzg_commitments + .iter() + .map(|commitment| commitment.as_ref()) + .collect(); + let verification_result = self.context()?.verify_cell_kzg_proof_batch( + commitments.to_vec(), + columns, + cells.to_vec(), + proofs.to_vec(), + ); + + // Modify the result so it matches roughly what the previous method was doing. + match verification_result { + Ok(_) => Ok(()), + Err(e) if e.invalid_proof() => Err(Error::KzgVerificationFailed), + Err(e) => Err(Error::PeerDASKZG(e)), + } } - pub fn recover_all_cells( + pub fn recover_cells_and_compute_kzg_proofs( &self, - _cell_ids: &[u64], - _cells: &[Cell], - ) -> Result, Error> { - // TODO(das): use proper crypto once ckzg merges das branch - let cells = Box::new(core::array::from_fn(|_| Cell { bytes: [0u8; 2048] })); - Ok(cells) - } -} - -impl TryFrom for Kzg { - type Error = Error; - - fn try_from(trusted_setup: TrustedSetup) -> Result { - Kzg::new_from_trusted_setup(trusted_setup) + cell_ids: &[u64], + cells: &[CellRef<'_>], + ) -> Result { + let (cells, proofs) = self + .context()? + .recover_cells_and_proofs(cell_ids.to_vec(), cells.to_vec()) + .map_err(Error::PeerDASKZG)?; + + // Convert the proof type to a c-kzg proof type + let c_kzg_proof = proofs.map(KzgProof); + Ok((cells, c_kzg_proof)) } } diff --git a/crypto/kzg/src/trusted_setup.rs b/crypto/kzg/src/trusted_setup.rs index d930eabe224..6ddc33df5ab 100644 --- a/crypto/kzg/src/trusted_setup.rs +++ b/crypto/kzg/src/trusted_setup.rs @@ -1,3 +1,4 @@ +use crate::PeerDASTrustedSetup; use c_kzg::{BYTES_PER_G1_POINT, BYTES_PER_G2_POINT}; use serde::{ de::{self, Deserializer, Visitor}, @@ -43,6 +44,28 @@ impl TrustedSetup { } } +impl From<&TrustedSetup> for PeerDASTrustedSetup { + fn from(trusted_setup: &TrustedSetup) -> Self { + Self { + g1_monomial: trusted_setup + .g1_monomial_points + .iter() + .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) + .collect::>(), + g1_lagrange: trusted_setup + .g1_points + .iter() + .map(|g1_point| format!("0x{}", hex::encode(g1_point.0))) + .collect::>(), + g2_monomial: trusted_setup + .g2_points + .iter() + .map(|g2_point| format!("0x{}", hex::encode(g2_point.0))) + .collect::>(), + } + } +} + impl Serialize for G1Point { fn serialize(&self, serializer: S) -> Result where From 22ccdb6c23965bdb231ec93fa7c709ae555ff42e Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Mon, 12 Aug 2024 17:16:21 -0700 Subject: [PATCH 18/18] Reuse password option prompts again on a wrong password (#4380) * Prompt for password if incorrect in import * lint and fmt * Use if instead of match * Fix issue raised by @chong-he * Merge branch 'unstable' into reuse-pw-break * Remove unused function --- account_manager/src/validator/import.rs | 50 ++++++++++++++++++------- 1 file changed, 36 insertions(+), 14 deletions(-) diff --git a/account_manager/src/validator/import.rs b/account_manager/src/validator/import.rs index a7c72679f74..8f04e9059a3 100644 --- a/account_manager/src/validator/import.rs +++ b/account_manager/src/validator/import.rs @@ -178,7 +178,13 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin let password_opt = loop { if let Some(password) = previous_password.clone() { eprintln!("Reuse previous password."); - break Some(password); + if check_password_on_keystore(&keystore, &password)? { + break Some(password); + } else { + eprintln!("Reused password incorrect. Retry!"); + previous_password = None; + continue; + } } eprintln!(); eprintln!("{}", PASSWORD_PROMPT); @@ -201,20 +207,12 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin } }; - match keystore.decrypt_keypair(password.as_ref()) { - Ok(_) => { - eprintln!("Password is correct."); - eprintln!(); - sleep(Duration::from_secs(1)); // Provides nicer UX. - if reuse_password { - previous_password = Some(password.clone()); - } - break Some(password); - } - Err(eth2_keystore::Error::InvalidPassword) => { - eprintln!("Invalid password"); + // Check if the password unlocks the keystore + if check_password_on_keystore(&keystore, &password)? { + if reuse_password { + previous_password = Some(password.clone()); } - Err(e) => return Err(format!("Error whilst decrypting keypair: {:?}", e)), + break Some(password); } }; @@ -317,3 +315,27 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin Ok(()) } + +/// Checks if the given password unlocks the keystore. +/// +/// Returns `Ok(true)` if password unlocks the keystore successfully. +/// Returns `Ok(false` if password is incorrect. +/// Otherwise, returns the keystore error. +fn check_password_on_keystore( + keystore: &Keystore, + password: &ZeroizeString, +) -> Result { + match keystore.decrypt_keypair(password.as_ref()) { + Ok(_) => { + eprintln!("Password is correct."); + eprintln!(); + sleep(Duration::from_secs(1)); // Provides nicer UX. + Ok(true) + } + Err(eth2_keystore::Error::InvalidPassword) => { + eprintln!("Invalid password"); + Ok(false) + } + Err(e) => Err(format!("Error whilst decrypting keypair: {:?}", e)), + } +}