Skip to content

Commit

Permalink
Bids 2530/missing block0 (#2684)
Browse files Browse the repository at this point in the history
* (BIDS-2530) Fix previous arrow on block and slot page

* (BIDS-2530) Make slot page show block 0 (if available)

* (BIDS-2530) Enable GetBlocksDescending to get block 0

* (BIDS-2530) Enable getEth1BlocksTableData to get block 0

* (BIDS-2530) Improve getEth1BlockAndNext for block 0

* (BIDS-2530) Clamp limit in GetBlocksDescending

* (BIDS-2530) Improve comment for GetBlocksDescending

* (BIDS-2530) Enable indexMissingBlocks to work with block 0

* (BIDS-2530) Improve handling for block 0

* (BIDS-2530) Stop exporting all functions in misc tool

* (BIDS-2530) Fix comment

* (BIDS-2530) Add special handling on block page for genesis block 0

* (BIDS-2530) Show genesis slot badge properly

* (BIDS-2530) Fix slot page for pre merge slots

* (BIDS-2530) Improve function/variable naming

* (BIDS-2530) Add comment

* (BIDS-2530) Enable search to find block 0 and slots 0 to 9

* (BIDS-2530) Remove a space from a comment (yes, really)

* (BIDS-2530) Enable index-missing-blocks index a single block

* (BIDS-2530) Improve comment

* (BIDS-2530) Add comment

* (BIDS-2530) Fix slot 0 handling and rename variables

* (BIDS-2530) Reduce duplicated code

* (BIDS-2530) Use TimeToSlot

* (BIDS-2530) Improve IsPoSBlock0

* (BIDS-2530) Improve function name

* (BIDS-2530) Reduce indentation

* (BIDS-2530) Fix start for blocks table

* (BIDS-2530) Reduce code complexity and add comment

* (BIDS-2530) Adapt time handling in IsPoSBlock0

* (BIDS-2530) Reduce code complexity

* (BIDS-2530) Fix GetFullBlocksDescending for when block 0 is queued

* (BIDS-2530) Fix typo

* (BIDS-2530) Improve rowHandler

* (BIDS-2530) Improve block 0 in GetBlocksDescending

* (BIDS-2530) Harmonize block special handling

* (BIDS-2530) Improve comment for getEth1BlockAndNext

* (BIDS-2530) Remove debug output
  • Loading branch information
D13ce authored Nov 24, 2023
1 parent 6e173bc commit 91b2b05
Show file tree
Hide file tree
Showing 13 changed files with 293 additions and 245 deletions.
143 changes: 79 additions & 64 deletions cmd/misc/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"eth2-exporter/utils"
"eth2-exporter/version"
"fmt"
"math"
"math/big"
"net/http"
"strconv"
Expand Down Expand Up @@ -149,12 +150,12 @@ func main() {

switch opts.Command {
case "nameValidatorsByRanges":
err := NameValidatorsByRanges(opts.ValidatorNameRanges)
err := nameValidatorsByRanges(opts.ValidatorNameRanges)
if err != nil {
logrus.WithError(err).Fatal("error naming validators by ranges")
}
case "updateAPIKey":
err := UpdateAPIKey(opts.User)
err := updateAPIKey(opts.User)
if err != nil {
logrus.WithError(err).Fatal("error updating API key")
}
Expand Down Expand Up @@ -243,17 +244,17 @@ func main() {
}
}
case "debug-rewards":
CompareRewards(opts.StartDay, opts.EndDay, opts.Validator, bt)
compareRewards(opts.StartDay, opts.EndDay, opts.Validator, bt)
case "debug-blocks":
err = DebugBlocks()
err = debugBlocks()
case "clear-bigtable":
ClearBigtable(opts.Table, opts.Family, opts.Key, opts.DryRun, bt)
clearBigtable(opts.Table, opts.Family, opts.Key, opts.DryRun, bt)
case "index-old-eth1-blocks":
IndexOldEth1Blocks(opts.StartBlock, opts.EndBlock, opts.BatchSize, opts.DataConcurrency, opts.Transformers, bt, erigonClient)
indexOldEth1Blocks(opts.StartBlock, opts.EndBlock, opts.BatchSize, opts.DataConcurrency, opts.Transformers, bt, erigonClient)
case "update-aggregation-bits":
updateAggreationBits(rpcClient, opts.StartEpoch, opts.EndEpoch, opts.DataConcurrency)
case "update-block-finalization-sequentially":
err = UpdateBlockFinalizationSequentially()
err = updateBlockFinalizationSequentially()
case "historic-prices-export":
exportHistoricPrices(opts.StartDay, opts.EndDay)
case "index-missing-blocks":
Expand Down Expand Up @@ -454,7 +455,7 @@ func fixExecTransactionsCount() error {
return tx.Commit()
}

func UpdateBlockFinalizationSequentially() error {
func updateBlockFinalizationSequentially() error {
var err error

var maxSlot uint64
Expand Down Expand Up @@ -483,7 +484,7 @@ func UpdateBlockFinalizationSequentially() error {
break
}

logrus.WithFields(logrus.Fields{"minNonFinalizedSlot": minNonFinalizedSlot}).Infof("UpdateBlockFinalizationSequentially")
logrus.WithFields(logrus.Fields{"minNonFinalizedSlot": minNonFinalizedSlot}).Infof("updateBlockFinalizationSequentially")
nextStartEpoch := minNonFinalizedSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch
stepSize := uint64(100)
for ; ; time.Sleep(time.Millisecond * 50) {
Expand Down Expand Up @@ -512,7 +513,7 @@ func UpdateBlockFinalizationSequentially() error {
}
}

func DebugBlocks() error {
func debugBlocks() error {
elClient, err := rpc.NewErigonClient(utils.Config.Eth1ErigonEndpoint)
if err != nil {
return err
Expand Down Expand Up @@ -590,7 +591,7 @@ func DebugBlocks() error {
return nil
}

func NameValidatorsByRanges(rangesUrl string) error {
func nameValidatorsByRanges(rangesUrl string) error {
ranges := struct {
Ranges map[string]string `json:"ranges"`
}{}
Expand Down Expand Up @@ -824,7 +825,7 @@ func updateAggreationBits(rpcClient *rpc.LighthouseClient, startEpoch uint64, en
}

// Updates a users API key
func UpdateAPIKey(user uint64) error {
func updateAPIKey(user uint64) error {
type User struct {
PHash string `db:"password"`
Email string `db:"email"`
Expand Down Expand Up @@ -878,7 +879,7 @@ func UpdateAPIKey(user uint64) error {
}

// Debugging function to compare Rewards from the Statistic Table with the onces from the Big Table
func CompareRewards(dayStart uint64, dayEnd uint64, validator uint64, bt *db.Bigtable) {
func compareRewards(dayStart uint64, dayEnd uint64, validator uint64, bt *db.Bigtable) {

for day := dayStart; day <= dayEnd; day++ {
startEpoch := day * utils.EpochsPerDay()
Expand Down Expand Up @@ -908,7 +909,7 @@ func CompareRewards(dayStart uint64, dayEnd uint64, validator uint64, bt *db.Big

}

func ClearBigtable(table string, family string, key string, dryRun bool, bt *db.Bigtable) {
func clearBigtable(table string, family string, key string, dryRun bool, bt *db.Bigtable) {

if !dryRun {
confirmation := utils.CmdPrompt(fmt.Sprintf("Are you sure you want to delete all big table entries starting with [%v] for family [%v]?", key, family))
Expand Down Expand Up @@ -939,10 +940,12 @@ func ClearBigtable(table string, family string, key string, dryRun bool, bt *db.
logrus.Info("delete completed")
}

// Let's find blocks that are missing in bt and index them.
// Goes through the tableData table and checks what blocks in the given range from [start] to [end] are missing and exports/indexes the missing ones
//
// Both [start] and [end] are inclusive
// Pass math.MaxInt64 as [end] to export from [start] to the last block in the blocks table
func indexMissingBlocks(start uint64, end uint64, bt *db.Bigtable, client *rpc.ErigonClient) {

if end == 0 {
if end == math.MaxInt64 {
lastBlockFromBlocksTable, err := bt.GetLastBlockInBlocksTable()
if err != nil {
logrus.Errorf("error retrieving last blocks from blocks table: %v", err)
Expand All @@ -951,53 +954,70 @@ func indexMissingBlocks(start uint64, end uint64, bt *db.Bigtable, client *rpc.E
end = uint64(lastBlockFromBlocksTable)
}

errFields := map[string]interface{}{
"start": start,
"end": end}

batchSize := uint64(10000)
if start == 0 {
start = 1
}
for i := start; i < end; i += batchSize {
for from := start; from <= end; from += batchSize {
targetCount := batchSize
if i+targetCount >= end {
targetCount = end - i
if from+targetCount >= end {
targetCount = end - from + 1
}
to := i + targetCount - 1
to := from + targetCount - 1

list, err := bt.GetBlocksDescending(uint64(to), uint64(targetCount))
errFields["from"] = from
errFields["to"] = to
errFields["targetCount"] = targetCount

list, err := bt.GetBlocksDescending(to, targetCount)
if err != nil {
utils.LogError(err, "can not retrieve blocks via GetBlocksDescending from bigtable", 0)
utils.LogError(err, "error retrieving blocks from tableData", 0, errFields)
return
}
if uint64(len(list)) == targetCount {
logrus.Infof("found all blocks [%v]->[%v]", i, to)
} else {
logrus.Infof("oh no we are missing some blocks [%v]->[%v]", i, to)
blocksMap := make(map[uint64]bool)
for _, item := range list {
blocksMap[item.Number] = true

receivedLen := uint64(len(list))
if receivedLen == targetCount {
logrus.Infof("found all blocks [%v]->[%v], skipping batch", from, to)
continue
}

logrus.Infof("%v blocks are missing from [%v]->[%v]", targetCount-receivedLen, from, to)

blocksMap := make(map[uint64]bool)
for _, item := range list {
blocksMap[item.Number] = true
}

for block := from; block <= to; block++ {
if blocksMap[block] {
// block already saved, skip
continue
}
for j := uint64(i); j <= uint64(to); j++ {
if !blocksMap[j] {
logrus.Infof("block [%v] not found so we need to index it", j)
if _, err := db.BigtableClient.GetBlockFromBlocksTable(j); err != nil {
logrus.Infof("could not load [%v] from blocks table so we need to fetch it from the node and save it", j)
bc, _, err := client.GetBlock(int64(j), "parity/geth")
if err != nil {
utils.LogError(err, fmt.Sprintf("error getting block: %v from ethereum node", j), 0)
}
err = bt.SaveBlock(bc)
if err != nil {
utils.LogError(err, fmt.Sprintf("error saving block: %v ", j), 0)
}
}

IndexOldEth1Blocks(j, j, 1, 1, "all", bt, client)
logrus.Infof("block [%v] not found, will index it", block)
if _, err := db.BigtableClient.GetBlockFromBlocksTable(block); err != nil {
logrus.Infof("could not load [%v] from blocks table, will try to fetch it from the node and save it", block)

bc, _, err := client.GetBlock(int64(block), "parity/geth")
if err != nil {
utils.LogError(err, fmt.Sprintf("error getting block %v from the node", block), 0)
return
}

err = bt.SaveBlock(bc)
if err != nil {
utils.LogError(err, fmt.Sprintf("error saving block: %v ", block), 0)
return
}
}

indexOldEth1Blocks(block, block, 1, 1, "all", bt, client)
}
}
}

func IndexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, concurrency uint64, transformerFlag string, bt *db.Bigtable, client *rpc.ErigonClient) {
func indexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, concurrency uint64, transformerFlag string, bt *db.Bigtable, client *rpc.ErigonClient) {
if endBlock > 0 && endBlock < startBlock {
utils.LogError(nil, fmt.Sprintf("endBlock [%v] < startBlock [%v]", endBlock, startBlock), 0)
return
Expand Down Expand Up @@ -1057,20 +1077,15 @@ func IndexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, co

cache := freecache.NewCache(100 * 1024 * 1024) // 100 MB limit

if startBlock == 0 && endBlock == 0 {
utils.LogFatal(nil, "no start+end block defined", 0)
return
}

lastBlockFromBlocksTable, err := bt.GetLastBlockInBlocksTable()
if err != nil {
utils.LogError(err, "error retrieving last blocks from blocks table", 0)
return
}
to := endBlock
if endBlock == math.MaxInt64 {
lastBlockFromBlocksTable, err := bt.GetLastBlockInBlocksTable()
if err != nil {
utils.LogError(err, "error retrieving last blocks from blocks table", 0)
return
}

to := uint64(lastBlockFromBlocksTable)
if endBlock > 0 {
to = utilMath.MinU64(to, endBlock)
to = uint64(lastBlockFromBlocksTable)
}
blockCount := utilMath.MaxU64(1, batchSize)

Expand All @@ -1079,7 +1094,7 @@ func IndexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, co
toBlock := utilMath.MinU64(to, from+blockCount-1)

logrus.Infof("indexing blocks %v to %v in data table ...", from, toBlock)
err = bt.IndexEventsWithTransformers(int64(from), int64(toBlock), transforms, int64(concurrency), cache)
err := bt.IndexEventsWithTransformers(int64(from), int64(toBlock), transforms, int64(concurrency), cache)
if err != nil {
utils.LogError(err, "error indexing from bigtable", 0)
}
Expand All @@ -1088,7 +1103,7 @@ func IndexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, co
}

if importENSChanges {
if err = bt.ImportEnsUpdates(client.GetNativeClient()); err != nil {
if err := bt.ImportEnsUpdates(client.GetNativeClient()); err != nil {
utils.LogError(err, "error importing ens from events", 0)
return
}
Expand Down
Loading

0 comments on commit 91b2b05

Please sign in to comment.