diff --git a/backend/cmd/exporter/main.go b/backend/cmd/exporter/main.go index 8be9c7fc4..426ca33e0 100644 --- a/backend/cmd/exporter/main.go +++ b/backend/cmd/exporter/main.go @@ -19,7 +19,6 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/version" "github.com/gobitfly/beaconchain/pkg/exporter/modules" "github.com/gobitfly/beaconchain/pkg/exporter/services" - "github.com/gobitfly/beaconchain/pkg/monitoring" ) func Run() { @@ -138,8 +137,8 @@ func Run() { wg.Wait() // enable light-weight db connection monitoring - monitoring.Init(false) - monitoring.Start() + // monitoring.Init(false) + // monitoring.Start() if utils.Config.TieredCacheProvider != "redis" { log.Fatal(fmt.Errorf("no cache provider set, please set TierdCacheProvider (example redis)"), "", 0) diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index 43b499bcb..12da592b2 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -138,12 +138,13 @@ func Run() { requires, ok := REQUIRES_LIST[opts.Command] if !ok { requires = misctypes.Requires{ - Bigtable: true, - Redis: true, - ClNode: true, - ElNode: true, - UserDBs: true, - NetworkDBs: true, + Bigtable: true, + Redis: true, + ClNode: true, + ElNode: true, + UserDBs: true, + NetworkDBs: true, + ClickhouseDBs: false, } } @@ -195,9 +196,11 @@ func Run() { } // clickhouse - db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&cfg.ClickHouse.WriterDatabase, &cfg.ClickHouse.ReaderDatabase, "clickhouse", "clickhouse") - defer db.ClickHouseReader.Close() - defer db.ClickHouseWriter.Close() + if requires.ClickhouseDBs { + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&cfg.ClickHouse.WriterDatabase, &cfg.ClickHouse.ReaderDatabase, "clickhouse", "clickhouse") + defer db.ClickHouseReader.Close() + defer db.ClickHouseWriter.Close() + } // Initialize the persistent redis client if requires.Redis { @@ -1142,7 +1145,7 @@ func debugBlocks(clClient *rpc.LighthouseClient) error { } else if clBlock.ExecutionPayload.BlockNumber != i { log.Warnf("clBlock.ExecutionPayload.BlockNumber != i: %v != %v", clBlock.ExecutionPayload.BlockNumber, i) } else { - logFields["cl.txs"] = len(clBlock.ExecutionPayload.Transactions) + logFields["cl.txs"] = clBlock.ExecutionPayload.TransactionsCount } log.InfoWithFields(logFields, "debug block") diff --git a/backend/cmd/misc/misctypes/requires.go b/backend/cmd/misc/misctypes/requires.go index 6d2337412..178db5bd7 100644 --- a/backend/cmd/misc/misctypes/requires.go +++ b/backend/cmd/misc/misctypes/requires.go @@ -1,10 +1,11 @@ package misctypes type Requires struct { - Bigtable bool - Redis bool - ClNode bool - ElNode bool - NetworkDBs bool - UserDBs bool + Bigtable bool + Redis bool + ClNode bool + ElNode bool + NetworkDBs bool + UserDBs bool + ClickhouseDBs bool } diff --git a/backend/pkg/commons/config/config.go b/backend/pkg/commons/config/config.go index bd135f56d..fac99387c 100644 --- a/backend/pkg/commons/config/config.go +++ b/backend/pkg/commons/config/config.go @@ -31,3 +31,9 @@ var GnosisChainYml string //go:embed holesky.chain.yml var HoleskyChainYml string + +//go:embed mekong.chain.yml +var MekongChainYml string + +//go:embed pectra-devnet-5.chain.yml +var PectraDevnet5ChainYml string diff --git a/backend/pkg/commons/config/mekong.chain.yml b/backend/pkg/commons/config/mekong.chain.yml new file mode 100644 index 000000000..eed63477c --- /dev/null +++ b/backend/pkg/commons/config/mekong.chain.yml @@ -0,0 +1,153 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: mekong # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 100000 +# 2024-Nov-05 03:59:00 PM UTC +MIN_GENESIS_TIME: 1730822340 +GENESIS_FORK_VERSION: 0x10637624 +GENESIS_DELAY: 60 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20637624 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30637624 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40637624 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50637624 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60637624 +ELECTRA_FORK_EPOCH: 256 + +# Fulu +FULU_FORK_VERSION: 0x70000000 +FULU_FORK_EPOCH: 99999 + +# EIP7594 - Peerdas +EIP7594_FORK_VERSION: 0x70000000 +EIP7594_FORK_EPOCH: 99999 + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 2 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 30000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 128 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 7078815900 +DEPOSIT_NETWORK_ID: 7078815900 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# EIP7594 +NUMBER_OF_COLUMNS: 128 +MAX_CELLS_IN_EXTENDED_MATRIX: 768 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 + +# [New in Electra:EIP7251] +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 # 2**7 * 10**9 (= 128,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 # 2**8 * 10**9 (= 256,000,000,000) \ No newline at end of file diff --git a/backend/pkg/commons/config/pectra-devnet-5.chain.yml b/backend/pkg/commons/config/pectra-devnet-5.chain.yml new file mode 100644 index 000000000..d5b6b1ca2 --- /dev/null +++ b/backend/pkg/commons/config/pectra-devnet-5.chain.yml @@ -0,0 +1,164 @@ +# Extends the mainnet preset +PRESET_BASE: mainnet +CONFIG_NAME: testnet # needs to exist because of Prysm. Otherwise it conflicts with mainnet genesis + +# Genesis +# --------------------------------------------------------------- +# `2**14` (= 16,384) +MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 11300 +# 2025-Jan-16 01:30:00 PM UTC +MIN_GENESIS_TIME: 1737034200 +GENESIS_FORK_VERSION: 0x10710240 +GENESIS_DELAY: 60 + + +# Forking +# --------------------------------------------------------------- +# Some forks are disabled for now: +# - These may be re-assigned to another fork-version later +# - Temporarily set to max uint64 value: 2**64 - 1 + +# Altair +ALTAIR_FORK_VERSION: 0x20710240 +ALTAIR_FORK_EPOCH: 0 +# Merge +BELLATRIX_FORK_VERSION: 0x30710240 +BELLATRIX_FORK_EPOCH: 0 +TERMINAL_TOTAL_DIFFICULTY: 0 +TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 +TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 + +# Capella +CAPELLA_FORK_VERSION: 0x40710240 +CAPELLA_FORK_EPOCH: 0 + +# DENEB +DENEB_FORK_VERSION: 0x50710240 +DENEB_FORK_EPOCH: 0 + +# Electra +ELECTRA_FORK_VERSION: 0x60710240 +ELECTRA_FORK_EPOCH: 4 + +# Fulu +FULU_FORK_VERSION: 0x70710240 +FULU_FORK_EPOCH: 999999 + + +# Time parameters +# --------------------------------------------------------------- +# 12 seconds +SECONDS_PER_SLOT: 12 +# 14 (estimate from Eth1 mainnet) +SECONDS_PER_ETH1_BLOCK: 12 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 2 +# 2**8 (= 256) epochs ~27 hours +SHARD_COMMITTEE_PERIOD: 256 +# 2**11 (= 2,048) Eth1 blocks ~8 hours +ETH1_FOLLOW_DISTANCE: 2048 + +# Validator cycle +# --------------------------------------------------------------- +# 2**2 (= 4) +INACTIVITY_SCORE_BIAS: 4 +# 2**4 (= 16) +INACTIVITY_SCORE_RECOVERY_RATE: 16 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 128 +# [New in Deneb:EIP7514] 2**3 (= 8) +MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + +# Fork choice +# --------------------------------------------------------------- +# 40% +PROPOSER_SCORE_BOOST: 40 +# 20% +REORG_HEAD_WEIGHT_THRESHOLD: 20 +# 160% +REORG_PARENT_WEIGHT_THRESHOLD: 160 +# `2` epochs +REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + +# Deposit contract +# --------------------------------------------------------------- +DEPOSIT_CHAIN_ID: 7088110746 +DEPOSIT_NETWORK_ID: 7088110746 +DEPOSIT_CONTRACT_ADDRESS: 0x4242424242424242424242424242424242424242 + +# Networking +# --------------------------------------------------------------- +# `10 * 2**20` (= 10485760, 10 MiB) +GOSSIP_MAX_SIZE: 10485760 +# `2**10` (= 1024) +MAX_REQUEST_BLOCKS: 1024 +# `2**8` (= 256) +EPOCHS_PER_SUBNET_SUBSCRIPTION: 256 +# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) +MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024 +# `10 * 2**20` (=10485760, 10 MiB) +MAX_CHUNK_SIZE: 10485760 +# 5s +TTFB_TIMEOUT: 5 +# 10s +RESP_TIMEOUT: 10 +ATTESTATION_PROPAGATION_SLOT_RANGE: 32 +# 500ms +MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500 +MESSAGE_DOMAIN_INVALID_SNAPPY: 0x00000000 +MESSAGE_DOMAIN_VALID_SNAPPY: 0x01000000 +# 2 subnets per node +SUBNETS_PER_NODE: 2 +# 2**8 (= 64) +ATTESTATION_SUBNET_COUNT: 64 +ATTESTATION_SUBNET_EXTRA_BITS: 0 +# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS +ATTESTATION_SUBNET_PREFIX_BITS: 6 + +# Deneb +# `2**7` (=128) +MAX_REQUEST_BLOCKS_DENEB: 128 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK +MAX_REQUEST_BLOB_SIDECARS: 768 +# `2**12` (= 4096 epochs, ~18 days) +MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 +# `6` +BLOB_SIDECAR_SUBNET_COUNT: 6 +## `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 + +# Electra +# 2**7 * 10**9 (= 128,000,000,000) +MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 +# 2**8 * 10**9 (= 256,000,000,000) +MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT: 256000000000 +# `9` +BLOB_SIDECAR_SUBNET_COUNT_ELECTRA: 9 +# `uint64(6)` +TARGET_BLOBS_PER_BLOCK_ELECTRA: 6 +# `uint64(9)` +MAX_BLOBS_PER_BLOCK_ELECTRA: 9 +# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_ELECTRA +MAX_REQUEST_BLOB_SIDECARS_ELECTRA: 1152 + +# Whisk +# `Epoch(2**8)` +WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256 +# `Epoch(2)` +WHISK_PROPOSER_SELECTION_GAP: 2 + +# Fulu +NUMBER_OF_COLUMNS: 128 +MAX_CELLS_IN_EXTENDED_MATRIX: 768 +DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 +MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384 +SAMPLES_PER_SLOT: 8 +CUSTODY_REQUIREMENT: 4 +TARGET_BLOBS_PER_BLOCK_FULU: 9 +MAX_BLOBS_PER_BLOCK_FULU: 12 +# `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_FULU` +MAX_REQUEST_BLOB_SIDECARS_FULU: 1536 diff --git a/backend/pkg/commons/db/bigtable.go b/backend/pkg/commons/db/bigtable.go index ff4de7833..b772c9d10 100644 --- a/backend/pkg/commons/db/bigtable.go +++ b/backend/pkg/commons/db/bigtable.go @@ -547,9 +547,11 @@ func (bigtable *Bigtable) SaveValidatorBalances(epoch uint64, validators []*type balanceEncoded := make([]byte, 8) binary.LittleEndian.PutUint64(balanceEncoded, validator.Balance) - effectiveBalanceEncoded := uint8(validator.EffectiveBalance / 1e9) // we can encode the effective balance in 1 byte as it is capped at 32ETH and only decrements in 1 ETH steps - combined := append(balanceEncoded, effectiveBalanceEncoded) + effectiveBalanceEncoded := make([]byte, 8) + binary.LittleEndian.PutUint64(effectiveBalanceEncoded, validator.EffectiveBalance) + + combined := append(balanceEncoded, effectiveBalanceEncoded...) mut := &gcp_bigtable.Mutation{} mut.Set(VALIDATOR_BALANCES_FAMILY, "b", ts, combined) key := fmt.Sprintf("%s:%s:%s:%s", bigtable.chainId, bigtable.validatorIndexToKey(validator.Index), VALIDATOR_BALANCES_FAMILY, epochKey) diff --git a/backend/pkg/commons/db/db.go b/backend/pkg/commons/db/db.go index 8a16a64dc..d6dd3a82d 100644 --- a/backend/pkg/commons/db/db.go +++ b/backend/pkg/commons/db/db.go @@ -2504,3 +2504,97 @@ func CopyToTable[T []any](tableName string, columns []string, data []T) error { } return nil } + +func HasEventsForEpoch(epoch uint64) (bool, error) { + if epoch == 0 { + return true, nil + } + + firstSlot := (epoch - 1) * utils.Config.Chain.ClConfig.SlotsPerEpoch + lastSlot := (epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch) - 1 + var count uint64 + err := ReaderDb.Get(&count, ` + SELECT + COUNT(*) + FROM + consensus_layer_events + WHERE + slot >= $1 AND slot <= $2`, firstSlot, lastSlot) + if err != nil { + return false, fmt.Errorf("error checking for events for epoch: %w", err) + } + + return count > 0, nil +} + +func TransformSwitchToCompoundingRequests(firstSlot, lastSlot uint64, tx *sqlx.Tx) (int64, error) { + res, err := tx.Exec(` + INSERT INTO blocks_switch_to_compounding_requests (block_slot, block_root, request_index, address, validator_index) + SELECT + slot AS slot, + block_root AS block_root, + event_index AS request_index, + decode((data->>'address'), 'base64') AS address, + (data->>'index')::int AS validator_index + FROM consensus_layer_events WHERE event_name = 'SwitchToCompoundingEvent' AND slot >= $1 AND slot <= $2 ON CONFLICT DO NOTHING; + `, firstSlot, lastSlot) + if err != nil { + return 0, fmt.Errorf("error transforming consolidation requests: %w", err) + } + + consolidationRequestsProcessed, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("error getting the amount of processed consolidation requests: %w", err) + } + + return consolidationRequestsProcessed, nil +} + +func TransformConsolidationRequests(firstSlot, lastSlot uint64, tx *sqlx.Tx) (int64, error) { + res, err := tx.Exec(` + INSERT INTO blocks_consolidation_requests (block_slot, block_root, request_index, source_index, target_index, amount_consolidated) + SELECT + slot AS slot, + block_root AS block_root, + event_index AS request_index, + (data->>'source_index')::int AS source_index, + (data->>'target_index')::int AS target_index, + (data->>'amount')::bigint AS amount_consolidated + FROM consensus_layer_events WHERE event_name = 'ConsolidationProcessedEvent' AND slot >= $1 AND slot <= $2 ON CONFLICT DO NOTHING; + `, firstSlot, lastSlot) + if err != nil { + return 0, fmt.Errorf("error transforming consolidation requests: %w", err) + } + + consolidationRequestsProcessed, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("error getting the amount of processed consolidation requests: %w", err) + } + + return consolidationRequestsProcessed, nil +} + +func TransformDepositRequests(firstSlot, lastSlot uint64, tx *sqlx.Tx) (int64, error) { + res, err := tx.Exec(` + INSERT INTO blocks_deposit_requests (block_slot, block_root, request_index, pubkey, withdrawal_credentials, amount, signature) + SELECT + slot AS block_slot, + block_root AS block_root, + event_index AS request_index, + decode((data->>'pubkey'), 'base64') AS pubkey, + decode((data->>'withdrawal_credentials'), 'base64')::bytea AS withdrawal_credentials, + (data->>'amount')::bigint AS amount, + decode((data->>'signature'), 'base64')::bytea AS signature + FROM consensus_layer_events WHERE event_name = 'DepositProcessedEvent' AND slot >= $1 AND slot <= $2 ON CONFLICT DO NOTHING; +`, firstSlot, lastSlot) + if err != nil { + return 0, fmt.Errorf("error transforming deposit requests: %w", err) + } + + depositRequestsProcessed, err := res.RowsAffected() + if err != nil { + return 0, fmt.Errorf("error getting the amount of processed deposit requests: %w", err) + } + + return depositRequestsProcessed, nil +} diff --git a/backend/pkg/commons/db/migrations/postgres/20250131063102_pectra_tables.sql b/backend/pkg/commons/db/migrations/postgres/20250131063102_pectra_tables.sql new file mode 100644 index 000000000..bf547c166 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20250131063102_pectra_tables.sql @@ -0,0 +1,88 @@ +-- +goose Up +-- +goose StatementBegin +SELECT + 'creating blocks_consolidation_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_consolidation_requests ( + block_slot INT NOT NULL, + block_root BYTEA NOT NULL, + request_index INT NOT NULL, + source_address BYTEA NOT NULL, + source_index INT NOT NULL, + target_index INT NOT NULL, + PRIMARY KEY (block_slot, block_root, request_index) + ); + +SELECT + 'creating blocks_withdrawal_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_withdrawal_requests ( + block_slot INT NOT NULL, + block_root BYTEA NOT NULL, + request_index INT NOT NULL, + source_address BYTEA NOT NULL, + validator_pubkey BYTEA NOT NULL, + amount BIGINT NOT NULL, + PRIMARY KEY (block_slot, block_root, request_index) + ); + +SELECT + 'creating blocks_deposit_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_deposit_requests ( + block_slot INT NOT NULL, + block_root BYTEA NOT NULL, + request_index INT NOT NULL, + pubkey BYTEA NOT NULL, + withdrawal_credentials BYTEA NOT NULL, + amount BIGINT NOT NULL, + signature BYTEA NOT NULL, + PRIMARY KEY (block_slot, block_root, request_index) + ); + +SELECT + 'creating blocks_switch_to_compounding_requests table'; + +CREATE TABLE + IF NOT EXISTS blocks_switch_to_compounding_requests ( + block_slot int not null, + block_root bytea not null, + request_index int not null, + address bytea not null, + validator_index int not null, + primary key (block_slot, block_root, request_index) + ); + +SELECT + 'updating blocks_attestations table'; + +ALTER TABLE blocks_attestations +ADD COLUMN IF NOT EXISTS committeebits BYTEA; + +-- +goose StatementEnd +-- +goose Down +-- +goose StatementBegin +SELECT + 'dropping blocks_consolidation_requests table'; + +DROP TABLE IF EXISTS blocks_consolidation_requests; + +SELECT + 'dropping blocks_withdrawal_requests table'; + +DROP TABLE IF EXISTS blocks_withdrawal_requests; + +SELECT + 'dropping blocks_deposit_requests table'; + +DROP TABLE IF EXISTS blocks_deposit_requests; + +SELECT + 'dropping blocks_switch_to_compounding_requests table'; + +DROP TABLE IF EXISTS blocks_switch_to_compounding_requests; + +-- +goose StatementEnd \ No newline at end of file diff --git a/backend/pkg/commons/rpc/erigon.go b/backend/pkg/commons/rpc/erigon.go index eeacf95e6..f21ee5e78 100644 --- a/backend/pkg/commons/rpc/erigon.go +++ b/backend/pkg/commons/rpc/erigon.go @@ -713,7 +713,7 @@ func (client *ErigonClient) GetERC20TokenMetadata(token []byte) (*types.ERC20Met return ret, err } - if err == nil && len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { + if len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { // it's possible that a token contract implements the ERC20 interfaces but does not return any values; we use a backup in this case ret = &types.ERC20Metadata{ Decimals: []byte{0x0}, diff --git a/backend/pkg/commons/rpc/geth.go b/backend/pkg/commons/rpc/geth.go index 379f82d50..ccbd85340 100644 --- a/backend/pkg/commons/rpc/geth.go +++ b/backend/pkg/commons/rpc/geth.go @@ -443,7 +443,7 @@ func (client *GethClient) GetERC20TokenMetadata(token []byte) (*types.ERC20Metad return ret, err } - if err == nil && len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { + if len(ret.Decimals) == 0 && ret.Symbol == "" && len(ret.TotalSupply) == 0 { // it's possible that a token contract implements the ERC20 interfaces but does not return any values; we use a backup in this case ret = &types.ERC20Metadata{ Decimals: []byte{0x0}, diff --git a/backend/pkg/commons/rpc/interfaces.go b/backend/pkg/commons/rpc/interfaces.go index 8459f62ad..cf2e847e2 100644 --- a/backend/pkg/commons/rpc/interfaces.go +++ b/backend/pkg/commons/rpc/interfaces.go @@ -21,6 +21,7 @@ type Client interface { GetBalancesForEpoch(epoch int64) (map[uint64]uint64, error) GetValidatorState(epoch uint64) (*constypes.StandardValidatorsResponse, error) GetBlockHeader(slot uint64) (*constypes.StandardBeaconHeaderResponse, error) + GetStandardBeaconState(stateID any) (*constypes.StandardBeaconStateResponse, error) } type Eth1Client interface { diff --git a/backend/pkg/commons/rpc/lighthouse.go b/backend/pkg/commons/rpc/lighthouse.go index d87f8e0f8..f9c5a77cf 100644 --- a/backend/pkg/commons/rpc/lighthouse.go +++ b/backend/pkg/commons/rpc/lighthouse.go @@ -178,12 +178,21 @@ func (lc *LighthouseClient) GetEpochAssignments(epoch uint64) (*types.EpochAssig parsedCommittees, err := lc.cl.GetCommittees(depStateRoot, &epoch, nil, nil) if err != nil { - return nil, fmt.Errorf("error retrieving committees data: %w", err) + if strings.Contains(err.Error(), "epoch out of bounds, too far in future") { + log.Warnf("error retrieving committees data for epoch %v: %v, trying slot based retrieval", epoch, err) + parsedCommittees, err = lc.cl.GetCommittees(epoch*utils.Config.ClConfig.SlotsPerEpoch, &epoch, nil, nil) + if err != nil { + return nil, fmt.Errorf("error retrieving committees data: %w", err) + } + } else { + return nil, fmt.Errorf("error retrieving committees data: %w", err) + } } assignments := &types.EpochAssignments{ - ProposerAssignments: make(map[uint64]uint64), - AttestorAssignments: make(map[string]uint64), + ProposerAssignments: make(map[uint64]uint64), + AttestorAssignments: make(map[string]uint64), + AttestationCommitteeLengths: make(map[string]uint64), } // propose @@ -193,6 +202,7 @@ func (lc *LighthouseClient) GetEpochAssignments(epoch uint64) (*types.EpochAssig // attest for _, committee := range parsedCommittees.Data { + assignments.AttestationCommitteeLengths[fmt.Sprintf("%d-%d", committee.Slot, committee.Index)] = uint64(len(committee.Validators)) for i, valIndex := range committee.Validators { k := utils.FormatAttestorAssignmentKey(committee.Slot, committee.Index, uint64(i)) assignments.AttestorAssignments[k] = uint64(valIndex) @@ -616,6 +626,10 @@ func (lc *LighthouseClient) GetBlockBySlot(slot uint64) (*types.Block, error) { Deposits: make([]*types.Deposit, 0), VoluntaryExits: make([]*types.VoluntaryExit, 0), SyncAggregate: nil, + ExecutionRequests: &types.ExecutionRequests{ + Consolidations: make([]*types.ConsolidationExecutionRequest, 0), + Withdrawals: make([]*types.WithdrawalExecutionRequest, 0), + }, } if isFirstSlotOfEpoch { @@ -679,7 +693,7 @@ func (lc *LighthouseClient) GetBlockBySlot(slot uint64) (*types.Block, error) { } // for the first slot of an epoch, also retrieve the epoch assignments - if block.Slot%utils.Config.Chain.ClConfig.SlotsPerEpoch == 0 { + if isFirstSlotOfEpoch { var err error block.EpochAssignments, err = lc.GetEpochAssignments(block.Slot / utils.Config.Chain.ClConfig.SlotsPerEpoch) if err != nil { @@ -743,27 +757,69 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB SignedBLSToExecutionChange: make([]*types.SignedBLSToExecutionChange, len(parsedBlock.Message.Body.SignedBLSToExecutionChange)), BlobKZGCommitments: make([][]byte, len(parsedBlock.Message.Body.BlobKZGCommitments)), BlobKZGProofs: make([][]byte, len(parsedBlock.Message.Body.BlobKZGCommitments)), - AttestationDuties: make(map[types.ValidatorIndex][]types.Slot), - SyncDuties: make(map[types.ValidatorIndex]bool), + ExecutionRequests: &types.ExecutionRequests{ + Deposits: make([]*types.DepositExecutionRequest, len(parsedBlock.Message.Body.ExecutionRequests.Deposits)), + Consolidations: make([]*types.ConsolidationExecutionRequest, len(parsedBlock.Message.Body.ExecutionRequests.Consolidations)), + Withdrawals: make([]*types.WithdrawalExecutionRequest, len(parsedBlock.Message.Body.ExecutionRequests.Withdrawals)), + }, + AttestationDuties: make(map[types.ValidatorIndex][]types.Slot), + SyncDuties: make(map[types.ValidatorIndex]bool), } for i, c := range parsedBlock.Message.Body.BlobKZGCommitments { block.BlobKZGCommitments[i] = c } + if len(parsedBlock.Message.Body.ExecutionRequests.Consolidations) > 0 { + for i, consolidation := range parsedBlock.Message.Body.ExecutionRequests.Consolidations { + block.ExecutionRequests.Consolidations[i] = &types.ConsolidationExecutionRequest{ + SourceAddress: consolidation.SourceAddress, + SourcePubkey: consolidation.SourcePubkey, + TargetPubkey: consolidation.TargetPubkey, + } + } + } + + if len(parsedBlock.Message.Body.ExecutionRequests.Withdrawals) > 0 { + for i, withdrawal := range parsedBlock.Message.Body.ExecutionRequests.Withdrawals { + block.ExecutionRequests.Withdrawals[i] = &types.WithdrawalExecutionRequest{ + SourceAddress: withdrawal.SourceAddress, + ValidatorPubkey: withdrawal.ValidatorPubkey, + Amount: withdrawal.Amount, + } + } + } + + if len(parsedBlock.Message.Body.ExecutionRequests.Deposits) > 0 { + for i, deposit := range parsedBlock.Message.Body.ExecutionRequests.Deposits { + block.ExecutionRequests.Deposits[i] = &types.DepositExecutionRequest{ + Pubkey: deposit.Pubkey, + WithdrawalCredentials: deposit.WithdrawalCredentials, + Amount: deposit.Amount, + Signature: deposit.Signature, + Index: deposit.Index, + } + } + } + if len(parsedBlock.Message.Body.BlobKZGCommitments) > 0 { res, err := lc.GetBlobSidecars(fmt.Sprintf("%#x", block.BlockRoot)) if err != nil { - return nil, err - } - if len(res.Data) != len(parsedBlock.Message.Body.BlobKZGCommitments) { - return nil, fmt.Errorf("error constructing block at slot %v: len(blob_sidecars) != len(block.blob_kzg_commitments): %v != %v", block.Slot, len(res.Data), len(parsedBlock.Message.Body.BlobKZGCommitments)) - } - for i, d := range res.Data { - if !bytes.Equal(d.KzgCommitment, block.BlobKZGCommitments[i]) { - return nil, fmt.Errorf("error constructing block at slot %v: unequal kzg_commitments at index %v: %#x != %#x", block.Slot, i, d.KzgCommitment, block.BlobKZGCommitments[i]) + // TODO: remove this hack for mekong!!! + for i := 0; i < len(block.BlobKZGProofs); i++ { + block.BlobKZGProofs[i] = []byte{} + } + //return nil, err + } else { + if len(res.Data) != len(parsedBlock.Message.Body.BlobKZGCommitments) { + return nil, fmt.Errorf("error constructing block at slot %v: len(blob_sidecars) != len(block.blob_kzg_commitments): %v != %v", block.Slot, len(res.Data), len(parsedBlock.Message.Body.BlobKZGCommitments)) + } + for i, d := range res.Data { + if !bytes.Equal(d.KzgCommitment, block.BlobKZGCommitments[i]) { + return nil, fmt.Errorf("error constructing block at slot %v: unequal kzg_commitments at index %v: %#x != %#x", block.Slot, i, d.KzgCommitment, block.BlobKZGCommitments[i]) + } + block.BlobKZGProofs[i] = d.KzgProof } - block.BlobKZGProofs[i] = d.KzgProof } } @@ -798,43 +854,6 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB } if payload := parsedBlock.Message.Body.ExecutionPayload; payload != nil && !bytes.Equal(payload.ParentHash, make([]byte, 32)) { - txs := make([]*types.Transaction, 0, len(payload.Transactions)) - for i, rawTx := range payload.Transactions { - tx := &types.Transaction{Raw: rawTx} - var decTx gethtypes.Transaction - if err := decTx.UnmarshalBinary(rawTx); err != nil { - return nil, fmt.Errorf("error parsing tx %d block %x: %w", i, payload.BlockHash, err) - } else { - h := decTx.Hash() - tx.TxHash = h[:] - tx.AccountNonce = decTx.Nonce() - // big endian - tx.Price = decTx.GasPrice().Bytes() - tx.GasLimit = decTx.Gas() - sender, err := lc.signer.Sender(&decTx) - if err != nil { - return nil, fmt.Errorf("transaction with invalid sender (slot: %v, tx-hash: %x): %w", slot, h, err) - } - tx.Sender = sender.Bytes() - if v := decTx.To(); v != nil { - tx.Recipient = v.Bytes() - } else { - tx.Recipient = []byte{} - } - tx.Amount = decTx.Value().Bytes() - tx.Payload = decTx.Data() - tx.MaxPriorityFeePerGas = decTx.GasTipCap().Uint64() - tx.MaxFeePerGas = decTx.GasFeeCap().Uint64() - - if decTx.BlobGasFeeCap() != nil { - tx.MaxFeePerBlobGas = decTx.BlobGasFeeCap().Uint64() - } - for _, h := range decTx.BlobHashes() { - tx.BlobVersionedHashes = append(tx.BlobVersionedHashes, h.Bytes()) - } - } - txs = append(txs, tx) - } withdrawals := make([]*types.Withdrawals, 0, len(payload.Withdrawals)) for _, w := range payload.Withdrawals { withdrawals = append(withdrawals, &types.Withdrawals{ @@ -846,23 +865,23 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB } block.ExecutionPayload = &types.ExecutionPayload{ - ParentHash: payload.ParentHash, - FeeRecipient: payload.FeeRecipient, - StateRoot: payload.StateRoot, - ReceiptsRoot: payload.ReceiptsRoot, - LogsBloom: payload.LogsBloom, - Random: payload.PrevRandao, - BlockNumber: payload.BlockNumber, - GasLimit: payload.GasLimit, - GasUsed: payload.GasUsed, - Timestamp: payload.Timestamp, - ExtraData: payload.ExtraData, - BaseFeePerGas: payload.BaseFeePerGas, - BlockHash: payload.BlockHash, - Transactions: txs, - Withdrawals: withdrawals, - BlobGasUsed: payload.BlobGasUsed, - ExcessBlobGas: payload.ExcessBlobGas, + ParentHash: payload.ParentHash, + FeeRecipient: payload.FeeRecipient, + StateRoot: payload.StateRoot, + ReceiptsRoot: payload.ReceiptsRoot, + LogsBloom: payload.LogsBloom, + Random: payload.PrevRandao, + BlockNumber: payload.BlockNumber, + GasLimit: payload.GasLimit, + GasUsed: payload.GasUsed, + Timestamp: payload.Timestamp, + ExtraData: payload.ExtraData, + BaseFeePerGas: payload.BaseFeePerGas, + BlockHash: payload.BlockHash, + TransactionsCount: len(payload.Transactions), + Withdrawals: withdrawals, + BlobGasUsed: payload.BlobGasUsed, + ExcessBlobGas: payload.ExcessBlobGas, } } @@ -933,6 +952,7 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB for i, attestation := range parsedBlock.Message.Body.Attestations { a := &types.Attestation{ AggregationBits: attestation.AggregationBits, + CommitteeBits: attestation.CommitteeBits, Attesters: []uint64{}, Data: &types.AttestationData{ Slot: attestation.Data.Slot, @@ -951,24 +971,52 @@ func (lc *LighthouseClient) blockFromResponse(parsedHeaders *constypes.StandardB } aggregationBits := bitfield.Bitlist(a.AggregationBits) + committeeBits := bitfield.Bitvector64(a.CommitteeBits) + assignments, err := lc.GetEpochAssignments(a.Data.Slot / utils.Config.Chain.ClConfig.SlotsPerEpoch) if err != nil { return nil, fmt.Errorf("error receiving epoch assignment for epoch %v: %w", a.Data.Slot/utils.Config.Chain.ClConfig.SlotsPerEpoch, err) } + if len(a.CommitteeBits) == 0 { + for i := uint64(0); i < aggregationBits.Len(); i++ { + if aggregationBits.BitAt(i) { + validator, found := assignments.AttestorAssignments[utils.FormatAttestorAssignmentKey(a.Data.Slot, uint64(a.Data.CommitteeIndex), i)] + if !found { // This should never happen! + validator = 0 + log.Fatal(fmt.Errorf("error retrieving assigned validator for attestation %v of block %v for slot %v committee index %v member index %v", i, block.Slot, a.Data.Slot, a.Data.CommitteeIndex, i), "", 0) + } + a.Attesters = append(a.Attesters, validator) - for i := uint64(0); i < aggregationBits.Len(); i++ { - if aggregationBits.BitAt(i) { - validator, found := assignments.AttestorAssignments[utils.FormatAttestorAssignmentKey(a.Data.Slot, uint64(a.Data.CommitteeIndex), i)] - if !found { // This should never happen! - validator = 0 - log.Fatal(fmt.Errorf("error retrieving assigned validator for attestation %v of block %v for slot %v committee index %v member index %v", i, block.Slot, a.Data.Slot, a.Data.CommitteeIndex, i), "", 0) + if block.AttestationDuties[types.ValidatorIndex(validator)] == nil { + block.AttestationDuties[types.ValidatorIndex(validator)] = []types.Slot{types.Slot(a.Data.Slot)} + } else { + block.AttestationDuties[types.ValidatorIndex(validator)] = append(block.AttestationDuties[types.ValidatorIndex(validator)], types.Slot(a.Data.Slot)) + } } - a.Attesters = append(a.Attesters, validator) - - if block.AttestationDuties[types.ValidatorIndex(validator)] == nil { - block.AttestationDuties[types.ValidatorIndex(validator)] = []types.Slot{types.Slot(a.Data.Slot)} - } else { - block.AttestationDuties[types.ValidatorIndex(validator)] = append(block.AttestationDuties[types.ValidatorIndex(validator)], types.Slot(a.Data.Slot)) + } + } else { + attestationsBitsOffset := uint64(0) + for i := uint64(0); i < committeeBits.Len(); i++ { + if committeeBits.BitAt(i) { + committeeLength := assignments.AttestationCommitteeLengths[fmt.Sprintf("%d-%d", a.Data.Slot, i)] + for j := uint64(0); j < committeeLength; j++ { + if aggregationBits.BitAt(attestationsBitsOffset + j) { + validator, found := assignments.AttestorAssignments[utils.FormatAttestorAssignmentKey(a.Data.Slot, i, j)] + if !found { // This should never happen! + validator = 0 + log.Fatal(fmt.Errorf("error retrieving assigned validator for attestation %v of block %v for slot %v committee index %v member index %v", i, block.Slot, a.Data.Slot, a.Data.CommitteeIndex, i), "", 0) + } + //log.Infof("attestation %v of block %v for slot %v committee index %v member index %v validator %v", i, block.Slot, a.Data.Slot, i, attestationsBitsOffset+j, validator) + a.Attesters = append(a.Attesters, validator) + + if block.AttestationDuties[types.ValidatorIndex(validator)] == nil { + block.AttestationDuties[types.ValidatorIndex(validator)] = []types.Slot{types.Slot(a.Data.Slot)} + } else { + block.AttestationDuties[types.ValidatorIndex(validator)] = append(block.AttestationDuties[types.ValidatorIndex(validator)], types.Slot(a.Data.Slot)) + } + } + } + attestationsBitsOffset += committeeLength } } } @@ -1128,3 +1176,7 @@ type ExecutionPayload struct { BlobGasUsed constypes.Uint64Str `json:"blob_gas_used"` ExcessBlobGas constypes.Uint64Str `json:"excess_blob_gas"` } + +func (lc *LighthouseClient) GetStandardBeaconState(stateID any) (*constypes.StandardBeaconStateResponse, error) { + return lc.cl.GetState(stateID) +} diff --git a/backend/pkg/commons/types/chain.go b/backend/pkg/commons/types/chain.go index 36321267e..1a211abc0 100644 --- a/backend/pkg/commons/types/chain.go +++ b/backend/pkg/commons/types/chain.go @@ -28,6 +28,8 @@ type ClChainConfig struct { CappellaForkEpoch uint64 `yaml:"CAPELLA_FORK_EPOCH"` DenebForkVersion string `yaml:"DENEB_FORK_VERSION"` DenebForkEpoch uint64 `yaml:"DENEB_FORK_EPOCH"` + ElectraForkVersion string `yaml:"ELECTRA_FORK_VERSION"` + ElectraForkEpoch uint64 `yaml:"ELECTRA_FORK_EPOCH"` Eip6110ForkVersion string `yaml:"EIP6110_FORK_VERSION"` Eip6110ForkEpoch uint64 `yaml:"EIP6110_FORK_EPOCH"` Eip7002ForkVersion string `yaml:"EIP7002_FORK_VERSION"` diff --git a/backend/pkg/commons/types/exporter.go b/backend/pkg/commons/types/exporter.go index 197638472..ea3e41727 100644 --- a/backend/pkg/commons/types/exporter.go +++ b/backend/pkg/commons/types/exporter.go @@ -130,11 +130,47 @@ type Block struct { ExcessBlobGas uint64 BlobKZGCommitments [][]byte BlobKZGProofs [][]byte + ExecutionRequests *ExecutionRequests + AttestationDuties map[ValidatorIndex][]Slot SyncDuties map[ValidatorIndex]bool Finalized bool EpochAssignments *EpochAssignments Validators []*Validator + QueuedExecutionRequest *ExecutionRequests + ProcessedExecutionRequests *ExecutionRequests +} + +type ExecutionRequests struct { + Deposits []*DepositExecutionRequest + Withdrawals []*WithdrawalExecutionRequest + Consolidations []*ConsolidationExecutionRequest +} + +type DepositExecutionRequest struct { + Pubkey []byte + WithdrawalCredentials []byte + Amount uint64 + Signature []byte + Index uint64 + SlotFromState uint64 +} + +type WithdrawalExecutionRequest struct { + SourceAddress []byte + ValidatorPubkey []byte + ValidatorIndex uint64 + Amount uint64 + WithdrawableEpoch uint64 +} + +type ConsolidationExecutionRequest struct { + SourceAddress []byte + SourcePubkey []byte + SourceIndex uint64 + TargetPubkey []byte + TargetIndex uint64 + AmountConsolidated uint64 } type SignedBLSToExecutionChange struct { @@ -170,23 +206,23 @@ type Transaction struct { } type ExecutionPayload struct { - ParentHash []byte - FeeRecipient []byte - StateRoot []byte - ReceiptsRoot []byte - LogsBloom []byte - Random []byte - BlockNumber uint64 - GasLimit uint64 - GasUsed uint64 - Timestamp uint64 - ExtraData []byte - BaseFeePerGas uint64 - BlockHash []byte - Transactions []*Transaction - Withdrawals []*Withdrawals - BlobGasUsed uint64 - ExcessBlobGas uint64 + ParentHash []byte + FeeRecipient []byte + StateRoot []byte + ReceiptsRoot []byte + LogsBloom []byte + Random []byte + BlockNumber uint64 + GasLimit uint64 + GasUsed uint64 + Timestamp uint64 + ExtraData []byte + BaseFeePerGas uint64 + BlockHash []byte + TransactionsCount int + Withdrawals []*Withdrawals + BlobGasUsed uint64 + ExcessBlobGas uint64 } type Withdrawals struct { @@ -243,6 +279,7 @@ type IndexedAttestation struct { // Attestation is a struct to hold attestation header data type Attestation struct { AggregationBits []byte + CommitteeBits []byte Attesters []uint64 Data *AttestationData Signature []byte @@ -297,9 +334,10 @@ type CanonBlock struct { // EpochAssignments is a struct to hold epoch assignment data type EpochAssignments struct { - ProposerAssignments map[uint64]uint64 - AttestorAssignments map[string]uint64 - SyncAssignments []uint64 + ProposerAssignments map[uint64]uint64 + AttestorAssignments map[string]uint64 + AttestationCommitteeLengths map[string]uint64 + SyncAssignments []uint64 } // ELDeposit is a struct to hold execution layer deposit data diff --git a/backend/pkg/commons/utils/config.go b/backend/pkg/commons/utils/config.go index 134f926fc..0913e619e 100644 --- a/backend/pkg/commons/utils/config.go +++ b/backend/pkg/commons/utils/config.go @@ -291,6 +291,10 @@ func setELConfig(cfg *types.Config) error { err = yaml.Unmarshal([]byte(config.GnosisChainYml), &minimalCfg) case "holesky": err = yaml.Unmarshal([]byte(config.HoleskyChainYml), &minimalCfg) + case "mekong": + err = yaml.Unmarshal([]byte(config.MekongChainYml), &minimalCfg) + case "pectra-devnet-5": + err = yaml.Unmarshal([]byte(config.PectraDevnet5ChainYml), &minimalCfg) default: return fmt.Errorf("tried to set known chain-config, but unknown chain-name: %v (path: %v)", cfg.Chain.Name, cfg.Chain.ElConfigPath) } @@ -397,6 +401,8 @@ func setCLConfig(cfg *types.Config) error { CappellaForkEpoch: *jr.Data.CapellaForkEpoch, DenebForkVersion: jr.Data.DenebForkVersion, DenebForkEpoch: *jr.Data.DenebForkEpoch, + ElectraForkVersion: jr.Data.ElectraForkVersion, + ElectraForkEpoch: *jr.Data.ElectraForkEpoch, SecondsPerSlot: uint64(jr.Data.SecondsPerSlot), SecondsPerEth1Block: uint64(jr.Data.SecondsPerEth1Block), MinValidatorWithdrawabilityDelay: uint64(jr.Data.MinValidatorWithdrawabilityDelay), diff --git a/backend/pkg/commons/utils/utils.go b/backend/pkg/commons/utils/utils.go index 20c45e829..d931cb570 100644 --- a/backend/pkg/commons/utils/utils.go +++ b/backend/pkg/commons/utils/utils.go @@ -406,3 +406,10 @@ func FirstN(input string, n int) string { } return input[:n] } + +func Min(a, b uint64) uint64 { + if a < b { + return a + } + return b +} diff --git a/backend/pkg/consapi/client.go b/backend/pkg/consapi/client.go index 017083078..ccaca28b4 100644 --- a/backend/pkg/consapi/client.go +++ b/backend/pkg/consapi/client.go @@ -62,6 +62,9 @@ type ClientInt interface { // /eth/v1/events GetEvents(topics []types.EventTopic) chan *types.EventResponse + + // /eth/v1/debug/beacon/states/%v + GetState(stateID any) (*types.StandardBeaconStateResponse, error) } type NodeClient struct { Endpoint string diff --git a/backend/pkg/consapi/client_node.go b/backend/pkg/consapi/client_node.go index a0c136d01..344b64d76 100644 --- a/backend/pkg/consapi/client_node.go +++ b/backend/pkg/consapi/client_node.go @@ -178,3 +178,8 @@ func (r *NodeClient) GetEvents(topics []types.EventTopic) chan *types.EventRespo }() return responseCh } + +func (r *NodeClient) GetState(stateID any) (*types.StandardBeaconStateResponse, error) { + requestURL := fmt.Sprintf("%s/eth/v1/debug/beacon/states/%v", r.Endpoint, stateID) + return network.Get[types.StandardBeaconStateResponse](r.httpClient, requestURL) +} diff --git a/backend/pkg/consapi/types/slot.go b/backend/pkg/consapi/types/slot.go index cfc72acfa..c9c5d41c5 100644 --- a/backend/pkg/consapi/types/slot.go +++ b/backend/pkg/consapi/types/slot.go @@ -1,6 +1,8 @@ package types -import "github.com/ethereum/go-ethereum/common/hexutil" +import ( + "github.com/ethereum/go-ethereum/common/hexutil" +) // /eth/v2/beacon/blocks/{block_id} type StandardBeaconSlotResponse struct { @@ -37,11 +39,38 @@ type AnySignedBlock struct { // present only after deneb BlobKZGCommitments []hexutil.Bytes `json:"blob_kzg_commitments"` + + // present only after pectra + ExecutionRequests struct { + Deposits []DepositExecutionRequest `json:"deposits"` + Withdrawals []WithdrawalExecutionRequest `json:"withdrawals"` + Consolidations []ConsolidationExecutionRequest `json:"consolidations"` + } `json:"execution_requests"` } `json:"body"` } `json:"message"` Signature hexutil.Bytes `json:"signature"` } +type DepositExecutionRequest struct { + Pubkey hexutil.Bytes `json:"pubkey"` + WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"` + Amount uint64 `json:"amount,string"` + Signature hexutil.Bytes `json:"signature"` + Index uint64 `json:"index,string"` +} + +type WithdrawalExecutionRequest struct { + SourceAddress hexutil.Bytes `json:"source_address"` + ValidatorPubkey hexutil.Bytes `json:"validator_pubkey"` + Amount uint64 `json:"amount,string"` +} + +type ConsolidationExecutionRequest struct { + SourceAddress hexutil.Bytes `json:"source_address"` + SourcePubkey hexutil.Bytes `json:"source_pubkey"` + TargetPubkey hexutil.Bytes `json:"target_pubkey"` +} + type ProposerSlashing struct { SignedHeader1 struct { Message struct { @@ -120,6 +149,7 @@ func (a *AttesterSlashing) GetSlashedIndices() []uint64 { type Attestation struct { AggregationBits hexutil.Bytes `json:"aggregation_bits"` + CommitteeBits hexutil.Bytes `json:"committee_bits"` Signature hexutil.Bytes `json:"signature"` Data struct { Slot uint64 `json:"slot,string"` diff --git a/backend/pkg/consapi/types/spec.go b/backend/pkg/consapi/types/spec.go index 181fd7b65..fe6e06f4b 100644 --- a/backend/pkg/consapi/types/spec.go +++ b/backend/pkg/consapi/types/spec.go @@ -23,6 +23,8 @@ type StandardSpec struct { CapellaForkEpoch *uint64 `json:"CAPELLA_FORK_EPOCH,string"` DenebForkVersion string `json:"DENEB_FORK_VERSION"` DenebForkEpoch *uint64 `json:"DENEB_FORK_EPOCH,string"` + ElectraForkVersion string `json:"ELECTRA_FORK_VERSION"` + ElectraForkEpoch *uint64 `json:"ELECTRA_FORK_EPOCH,string"` SecondsPerSlot int64 `json:"SECONDS_PER_SLOT,string"` SecondsPerEth1Block int64 `json:"SECONDS_PER_ETH1_BLOCK,string"` MinValidatorWithdrawabilityDelay int64 `json:"MIN_VALIDATOR_WITHDRAWABILITY_DELAY,string"` diff --git a/backend/pkg/consapi/types/state.go b/backend/pkg/consapi/types/state.go new file mode 100644 index 000000000..868d91166 --- /dev/null +++ b/backend/pkg/consapi/types/state.go @@ -0,0 +1,37 @@ +package types + +import "github.com/ethereum/go-ethereum/common/hexutil" + +type StandardBeaconStateResponse struct { + ExecutionOptimistic bool `json:"execution_optimistic"` + Finalized bool `json:"finalized"` + Data struct { + Balances []Uint64Str `json:"balances"` + Validators []struct { + Pubkey hexutil.Bytes `json:"pubkey"` + WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"` + EffectiveBalance uint64 `json:"effective_balance,string"` + Slashed bool `json:"slashed"` + ActivationEligibilityEpoch uint64 `json:"activation_eligibility_epoch,string"` + ActivationEpoch uint64 `json:"activation_epoch,string"` + ExitEpoch uint64 `json:"exit_epoch,string"` + WithdrawalbleEpoch uint64 `json:"withdrawable_epoch,string"` + } `json:"validators"` + PendingConsolidations []struct { + SourceIndex uint64 `json:"source_index,string"` + TargetIndex uint64 `json:"target_index,string"` + } `json:"pending_consolidations"` + PendingPartialWithdrawals []struct { + ValidatorIndex uint64 `json:"validator_index,string"` + Amount uint64 `json:"amount,string"` + WithdrawableEpopch uint64 `json:"withdrawable_epoch,string"` + } `json:"pending_partial_withdrawals"` + PendingDeposits []struct { + Pubkey hexutil.Bytes `json:"pubkey"` + WithdrawalCredentials hexutil.Bytes `json:"withdrawal_credentials"` + Amount uint64 `json:"amount,string"` + Signature hexutil.Bytes `json:"signature"` + Slot uint64 `json:"slot,string"` + } `json:"pending_deposits"` + } `json:"data"` +} diff --git a/backend/pkg/consapi/types/validator.go b/backend/pkg/consapi/types/validator.go index 4a2cd0674..ad27c38e9 100644 --- a/backend/pkg/consapi/types/validator.go +++ b/backend/pkg/consapi/types/validator.go @@ -1,6 +1,8 @@ package types -import "github.com/ethereum/go-ethereum/common/hexutil" +import ( + "github.com/ethereum/go-ethereum/common/hexutil" +) const ( // Node statuses diff --git a/backend/pkg/exporter/db/db.go b/backend/pkg/exporter/db/db.go index 033d652ea..bd909b387 100644 --- a/backend/pkg/exporter/db/db.go +++ b/backend/pkg/exporter/db/db.go @@ -59,7 +59,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1) ON CONFLICT (block_hash) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtExecutionPayload: %w", err) } defer stmtExecutionPayload.Close() @@ -68,7 +68,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37, $38, $39, $40) ON CONFLICT (slot, blockroot) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtBlock: %w", err) } defer stmtBlock.Close() @@ -77,7 +77,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_slot, block_root, withdrawalindex) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtWithdrawals: %w", err) } defer stmtWithdrawals.Close() @@ -86,7 +86,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_slot, block_root, validatorindex) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtBLSChange: %w", err) } defer stmtBLSChange.Close() @@ -95,7 +95,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtProposerSlashing: %w", err) } defer stmtProposerSlashing.Close() @@ -104,16 +104,16 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21) ON CONFLICT (block_slot, block_index) DO UPDATE SET attestation1_indices = excluded.attestation1_indices, attestation2_indices = excluded.attestation2_indices`) if err != nil { - return err + return fmt.Errorf("error preparing stmtAttesterSlashing: %w", err) } defer stmtAttesterSlashing.Close() stmtAttestations, err := tx.Prepare(` - INSERT INTO blocks_attestations (block_slot, block_index, block_root, aggregationbits, validators, signature, slot, committeeindex, beaconblockroot, source_epoch, source_root, target_epoch, target_root) - VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + INSERT INTO blocks_attestations (block_slot, block_index, block_root, aggregationbits, validators, signature, slot, committeeindex, beaconblockroot, source_epoch, source_root, target_epoch, target_root, committeebits) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtAttestations: %w", err) } defer stmtAttestations.Close() @@ -122,7 +122,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtDeposits: %w", err) } defer stmtDeposits.Close() @@ -131,7 +131,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_root, index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtBlobs: %w", err) } defer stmtBlobs.Close() @@ -140,7 +140,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (block_slot, block_index) DO NOTHING`) if err != nil { - return err + return fmt.Errorf("error preparing stmtVoluntaryExits: %w", err) } defer stmtVoluntaryExits.Close() @@ -149,7 +149,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo VALUES ($1, $2, $3, $4) ON CONFLICT (epoch, validatorindex, proposerslot) DO UPDATE SET status = excluded.status`) if err != nil { - return err + return fmt.Errorf("error preparing stmtProposalAssignments: %w", err) } defer stmtProposalAssignments.Close() @@ -211,7 +211,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo ExtraData []byte BaseFeePerGas *uint64 BlockHash []byte - TxCount *int64 + TxCount *int WithdrawalCount *int64 BlobGasUsed *uint64 ExcessBlobGas *uint64 @@ -221,7 +221,6 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo execData := new(exectionPayloadData) if b.ExecutionPayload != nil { - txCount := int64(len(b.ExecutionPayload.Transactions)) withdrawalCount := int64(len(b.ExecutionPayload.Withdrawals)) blobTxCount := int64(len(b.BlobKZGCommitments)) execData = &exectionPayloadData{ @@ -238,7 +237,7 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo ExtraData: b.ExecutionPayload.ExtraData, BaseFeePerGas: &b.ExecutionPayload.BaseFeePerGas, BlockHash: b.ExecutionPayload.BlockHash, - TxCount: &txCount, + TxCount: &b.ExecutionPayload.TransactionsCount, WithdrawalCount: &withdrawalCount, BlobGasUsed: &b.ExecutionPayload.BlobGasUsed, ExcessBlobGas: &b.ExecutionPayload.ExcessBlobGas, @@ -328,8 +327,9 @@ func saveBlocks(blocks map[uint64]map[string]*types.Block, tx *sqlx.Tx, forceSlo return fmt.Errorf("error executing stmtAttesterSlashing for block %v index %v: %w", b.Slot, i, err) } } + for i, a := range b.Attestations { - _, err = stmtAttestations.Exec(b.Slot, i, b.BlockRoot, a.AggregationBits, pq.Array(a.Attesters), a.Signature, a.Data.Slot, a.Data.CommitteeIndex, a.Data.BeaconBlockRoot, a.Data.Source.Epoch, a.Data.Source.Root, a.Data.Target.Epoch, a.Data.Target.Root) + _, err = stmtAttestations.Exec(b.Slot, i, b.BlockRoot, a.AggregationBits, pq.Array(a.Attesters), a.Signature, a.Data.Slot, a.Data.CommitteeIndex, a.Data.BeaconBlockRoot, a.Data.Source.Epoch, a.Data.Source.Root, a.Data.Target.Epoch, a.Data.Target.Root, a.CommitteeBits) if err != nil { return fmt.Errorf("error executing stmtAttestations for block %v index %v: %w", b.Slot, i, err) } diff --git a/backend/pkg/exporter/modules/execution_rewards_finalizer.go b/backend/pkg/exporter/modules/execution_rewards_finalizer.go index 3659648cc..64f58c5b0 100644 --- a/backend/pkg/exporter/modules/execution_rewards_finalizer.go +++ b/backend/pkg/exporter/modules/execution_rewards_finalizer.go @@ -80,7 +80,7 @@ func (d *executionRewardsFinalizer) maintainTable() (err error) { var latestFinalizedSlot int64 err = db.ReaderDb.Get(&latestFinalizedSlot, ` SELECT - max(slot) + COALESCE(max(slot), 0) FROM blocks WHERE diff --git a/backend/pkg/exporter/modules/slot_exporter.go b/backend/pkg/exporter/modules/slot_exporter.go index da431b5e1..03b337444 100644 --- a/backend/pkg/exporter/modules/slot_exporter.go +++ b/backend/pkg/exporter/modules/slot_exporter.go @@ -101,11 +101,13 @@ func (d *slotExporterData) OnHead(_ *constypes.StandardEventHeadResponse) (err e defer utils.Rollback(tx) if d.FirstRun { + log.Infof("performing first run consistency checks") // get all slots we currently have in the database dbSlots, err := db.GetAllSlots(tx) if err != nil { return fmt.Errorf("error retrieving all db slots: %w", err) } + log.Info("retrieved all exported slots from the database") if len(dbSlots) > 0 { if dbSlots[0] != 0 { @@ -122,6 +124,7 @@ func (d *slotExporterData) OnHead(_ *constypes.StandardEventHeadResponse) (err e } if len(dbSlots) > 1 { + log.Info("performing gap checks") // export any gaps we might have (for whatever reason) for slotIndex := 1; slotIndex < len(dbSlots); slotIndex++ { previousSlot := dbSlots[slotIndex-1] @@ -170,7 +173,7 @@ func (d *slotExporterData) OnHead(_ *constypes.StandardEventHeadResponse) (err e slotsExported++ // in case of large export runs, export at most 10 epochs per tx - if slotsExported == int(utils.Config.Chain.ClConfig.SlotsPerEpoch)*10 { + if slotsExported == int(utils.Config.Chain.ClConfig.SlotsPerEpoch)*1 { // TODO: change this back to 10 err := tx.Commit() if err != nil { @@ -354,8 +357,43 @@ func ExportSlot(client rpc.Client, slot uint64, isHeadEpoch bool, tx *sqlx.Tx) e if block.EpochAssignments != nil { // export the epoch assignments as they are included in the first slot of an epoch epoch := utils.EpochOfSlot(block.Slot) - log.Infof("exporting duties & balances for epoch %v", epoch) + log.Infof("checking that events have been loaded for epoch %v", epoch) + for ; ; time.Sleep(time.Second) { + exported, err := db.HasEventsForEpoch(epoch) + if err != nil { + return fmt.Errorf("error retrieving events for epoch %v: %w", epoch, err) + } + if exported { + break + } + log.Infof("events for epoch %v have not been loaded yet, waiting", epoch) + } + log.Infof("events for epoch %v have been loaded, transforming consolidations & deposits", epoch) + + if epoch > 0 { + firstSlot := (epoch - 1) * utils.Config.Chain.ClConfig.SlotsPerEpoch + lastSlot := (epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch) - 1 + + switchToCompoundingRequestsProcessed, err := db.TransformSwitchToCompoundingRequests(firstSlot, lastSlot, tx) + if err != nil { + return fmt.Errorf("error transforming consolidation requests for epoch %v: %w", epoch, err) + } + log.Infof("transformed switch to compounding requests for epoch %v, processed %d requests", epoch, switchToCompoundingRequestsProcessed) + consolidationRequestsProcessed, err := db.TransformConsolidationRequests(firstSlot, lastSlot, tx) + if err != nil { + return fmt.Errorf("error transforming consolidation requests for epoch %v: %w", epoch, err) + } + log.Infof("transformed consolidations for epoch %v, processed %d requests", epoch, consolidationRequestsProcessed) + + depositRequestsProcessed, err := db.TransformDepositRequests(firstSlot, lastSlot, tx) + if err != nil { + return fmt.Errorf("error transforming deposit requests for epoch %v: %w", epoch, err) + } + log.Infof("transformed deposits for epoch %v, processed %d requests", epoch, depositRequestsProcessed) + } + + log.Infof("exporting duties & balances for epoch %v", epoch) // prepare the duties for export to bigtable syncDutiesEpoch := make(map[types.Slot]map[types.ValidatorIndex]bool) attDutiesEpoch := make(map[types.Slot]map[types.ValidatorIndex][]types.Slot) @@ -478,8 +516,9 @@ func ExportSlot(client rpc.Client, slot uint64, isHeadEpoch bool, tx *sqlx.Tx) e } return nil }) + // if we are exporting the head epoch, update the validator db table - if isHeadEpoch { + if isHeadEpoch || epoch%5 == 0 { // this function sets exports the validator status into the db // and also updates the status field in the validators array err := edb.SaveValidators(epoch, block.Validators, client, 10000, tx) diff --git a/frontend/types/network.ts b/frontend/types/network.ts index c5e0d651e..9f9f59700 100644 --- a/frontend/types/network.ts +++ b/frontend/types/network.ts @@ -16,7 +16,7 @@ const ChainIDs = { Gnosis: 100, Holesky: 17000, - + Pectra_Devnet_5: 7088110746, Sepolia: 11155111, } as const @@ -112,6 +112,26 @@ export const ChainInfo: Record = { slotsPerEpoch: 32, timeStampSlot0: 1695902400, }, + [ChainIDs.Pectra_Devnet_5]: { + clCurrency: 'ETH', + description: 'Devnet', + elCurrency: 'ETH', + family: ChainFamily.Ethereum, + mainCurrency: 'ETH', + mainNet: ChainIDs.Ethereum, + name: 'Ethereum Pectra Devnet 5', + nameParts: [ + 'Ethereum', + 'Pectra', + 'Devnet', + '5', + ], + priority: 41, + secondsPerSlot: 12, + shortName: 'Pectra', + slotsPerEpoch: 32, + timeStampSlot0: 1737034260, + }, [ChainIDs.Sepolia]: { clCurrency: 'ETH', description: 'Testnet',