diff --git a/cache/tiered_cache.go b/cache/tiered_cache.go index 815fa361b0..2bec69e849 100644 --- a/cache/tiered_cache.go +++ b/cache/tiered_cache.go @@ -13,6 +13,8 @@ import ( "github.com/sirupsen/logrus" ) +var _ TieredCacher = (*tieredCache)(nil) + // Tiered cache is a cache implementation combining a type tieredCache struct { localGoCache *freecache.Cache @@ -31,7 +33,19 @@ type RemoteCache interface { GetBool(ctx context.Context, key string) (bool, error) } -var TieredCache *tieredCache +type TieredCacher interface { + Set(key string, value interface{}, expiration time.Duration) error + SetString(key string, value string, expiration time.Duration) error + SetUint64(key string, value uint64, expiration time.Duration) error + SetBool(key string, value bool, expiration time.Duration) error + + GetStringWithLocalTimeout(key string, localExpiration time.Duration) (string, error) + GetUint64WithLocalTimeout(key string, localExpiration time.Duration) (uint64, error) + GetBoolWithLocalTimeout(key string, localExpiration time.Duration) (bool, error) + GetWithLocalTimeout(key string, localExpiration time.Duration, returnValue interface{}) (interface{}, error) +} + +var TieredCache TieredCacher func MustInitTieredCache(redisAddress string) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) diff --git a/cmd/eth1indexer/main.go b/cmd/eth1indexer/main.go index 65ff085fb7..875890e9b8 100644 --- a/cmd/eth1indexer/main.go +++ b/cmd/eth1indexer/main.go @@ -48,7 +48,7 @@ func main() { offsetBlocks := flag.Int64("blocks.offset", 100, "Blocks offset") checkBlocksGaps := flag.Bool("blocks.gaps", false, "Check for gaps in the blocks table") checkBlocksGapsLookback := flag.Int("blocks.gaps.lookback", 1000000, "Lookback for gaps check of the blocks table") - traceMode := flag.String("blocks.tracemode", "parity/geth", "Trace mode to use, can bei either 'parity', 'geth' or 'parity/geth' for both") + traceMode := flag.String("blocks.tracemode", "geth", "Trace mode to use, can bei either 'parity', 'geth' or 'parity/geth' for both") concurrencyData := flag.Int64("data.concurrency", 30, "Concurrency to use when indexing data from bigtable") startData := flag.Int64("data.start", 0, "Block to start indexing") @@ -187,7 +187,7 @@ func main() { return } - transforms := make([]func(blk *types.Eth1Block, cache *freecache.Cache) (*types.BulkMutations, *types.BulkMutations, error), 0) + transforms := make([]db.TransformFunc, 0) transforms = append(transforms, bt.TransformBlock, bt.TransformTx, diff --git a/cmd/misc/main.go b/cmd/misc/main.go index d409434f30..fff12ecb58 100644 --- a/cmd/misc/main.go +++ b/cmd/misc/main.go @@ -3,10 +3,10 @@ package main import ( "bytes" "context" - "database/sql" "encoding/base64" "encoding/json" + "flag" "fmt" "math" "math/big" @@ -17,6 +17,7 @@ import ( "time" "firebase.google.com/go/v4/messaging" + "github.com/gobitfly/eth2-beaconchain-explorer/cmd/misc/commands" "github.com/gobitfly/eth2-beaconchain-explorer/db" "github.com/gobitfly/eth2-beaconchain-explorer/exporter" @@ -27,19 +28,15 @@ import ( "github.com/gobitfly/eth2-beaconchain-explorer/utils" "github.com/gobitfly/eth2-beaconchain-explorer/version" + "github.com/Gurpartap/storekit-go" "github.com/coocood/freecache" "github.com/ethereum/go-ethereum/common" _ "github.com/jackc/pgx/v5/stdlib" "github.com/pkg/errors" utilMath "github.com/protolambda/zrnt/eth2/util/math" + "github.com/sirupsen/logrus" go_ens "github.com/wealdtech/go-ens/v3" "golang.org/x/sync/errgroup" - - "flag" - - "github.com/Gurpartap/storekit-go" - - "github.com/sirupsen/logrus" ) var opts = struct { @@ -77,7 +74,7 @@ func main() { statsPartitionCommand := commands.StatsMigratorCommand{} configPath := flag.String("config", "config/default.config.yml", "Path to the config file") - flag.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases, disable-user-per-email, validate-firebase-tokens") + flag.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, re-index-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases, disable-user-per-email, validate-firebase-tokens") flag.Uint64Var(&opts.StartEpoch, "start-epoch", 0, "start epoch") flag.Uint64Var(&opts.EndEpoch, "end-epoch", 0, "end epoch") flag.Uint64Var(&opts.User, "user", 0, "user id") @@ -320,6 +317,8 @@ func main() { exportHistoricPrices(opts.StartDay, opts.EndDay) case "index-missing-blocks": indexMissingBlocks(opts.StartBlock, opts.EndBlock, bt, erigonClient) + case "re-index-blocks": + reIndexBlocks(opts.StartBlock, opts.EndBlock, bt, erigonClient, opts.Transformers, opts.BatchSize, opts.DataConcurrency) case "migrate-last-attestation-slot-bigtable": migrateLastAttestationSlotToBigtable() case "migrate-app-purchases": @@ -440,6 +439,8 @@ func main() { err = disableUserPerEmail() case "fix-epochs": err = fixEpochs() + case "fix-internal-txs-from-node": + fixInternalTxsFromNode(opts.StartBlock, opts.EndBlock, opts.BatchSize, opts.DataConcurrency, bt) case "validate-firebase-tokens": err = validateFirebaseTokens() default: @@ -544,6 +545,52 @@ func disableUserPerEmail() error { return nil } +func fixInternalTxsFromNode(startBlock, endBlock, batchSize, concurrency uint64, bt *db.Bigtable) { + if endBlock > 0 && endBlock < startBlock { + utils.LogError(nil, fmt.Sprintf("endBlock [%v] < startBlock [%v]", endBlock, startBlock), 0) + return + } + + if concurrency == 0 { + utils.LogError(nil, "concurrency must be greater than 0", 0) + return + } + if bt == nil { + utils.LogError(nil, "no bigtable provided", 0) + return + } + + transformers := make([]func(blk *types.Eth1Block, cache *freecache.Cache) (*types.BulkMutations, *types.BulkMutations, error), 0) + transformers = append(transformers, bt.TransformBlock, bt.TransformTx, bt.TransformItx) + + to := endBlock + if endBlock == math.MaxInt64 { + lastBlockFromBlocksTable, err := bt.GetLastBlockInBlocksTable() + if err != nil { + utils.LogError(err, "error retrieving last blocks from blocks table", 0) + return + } + + to = uint64(lastBlockFromBlocksTable) + } + + cache := freecache.NewCache(100 * 1024 * 1024) // 100 MB limit + blockCount := utilMath.MaxU64(1, batchSize) + + logrus.Infof("Starting to reindex all txs for blocks ranging from %d to %d", startBlock, to) + for from := startBlock; from <= to; from = from + blockCount { + toBlock := utilMath.MinU64(to, from+blockCount-1) + + logrus.Infof("reindexing txs for blocks from height %v to %v in data table ...", from, toBlock) + err := bt.ReindexITxsFromNode(int64(from), int64(toBlock), int64(batchSize), int64(concurrency), transformers, cache) + if err != nil { + utils.LogError(err, "error indexing from bigtable", 0) + } + cache.Clear() + + } +} + func fixEns(erigonClient *rpc.ErigonClient) error { logrus.WithField("dry", opts.DryRun).Infof("command: fix-ens") addrs := []struct { @@ -1082,7 +1129,7 @@ func debugBlocks() error { } // logrus.WithFields(logrus.Fields{"block": i, "data": fmt.Sprintf("%+v", b)}).Infof("block from bt") - elBlock, _, err := elClient.GetBlock(int64(i), "parity/geth") + elBlock, _, err := elClient.GetBlock(int64(i), "geth") if err != nil { return err } @@ -1550,7 +1597,7 @@ func indexMissingBlocks(start uint64, end uint64, bt *db.Bigtable, client *rpc.E if _, err := db.BigtableClient.GetBlockFromBlocksTable(block); err != nil { logrus.Infof("could not load [%v] from blocks table, will try to fetch it from the node and save it", block) - bc, _, err := client.GetBlock(int64(block), "parity/geth") + bc, _, err := client.GetBlock(int64(block), "geth") if err != nil { utils.LogError(err, fmt.Sprintf("error getting block %v from the node", block), 0) return @@ -1568,21 +1615,115 @@ func indexMissingBlocks(start uint64, end uint64, bt *db.Bigtable, client *rpc.E } } -func indexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, concurrency uint64, transformerFlag string, bt *db.Bigtable, client *rpc.ErigonClient) { - if endBlock > 0 && endBlock < startBlock { - utils.LogError(nil, fmt.Sprintf("endBlock [%v] < startBlock [%v]", endBlock, startBlock), 0) +// Goes through the blocks in the given range from [start] to [end] and re indexes them with the provided transformers +// +// Both [start] and [end] are inclusive +// Pass math.MaxInt64 as [end] to export from [start] to the last block in the blocks table +func reIndexBlocks(start uint64, end uint64, bt *db.Bigtable, client *rpc.ErigonClient, transformerFlag string, batchSize uint64, concurrency uint64) { + if start > 0 && end < start { + utils.LogError(nil, fmt.Sprintf("endBlock [%v] < startBlock [%v]", end, start), 0) return } if concurrency == 0 { utils.LogError(nil, "concurrency must be greater than 0", 0) return } - if bt == nil { - utils.LogError(nil, "no bigtable provided", 0) + if end == math.MaxInt64 { + lastBlockFromBlocksTable, err := bt.GetLastBlockInBlocksTable() + if err != nil { + logrus.Errorf("error retrieving last blocks from blocks table: %v", err) + return + } + end = uint64(lastBlockFromBlocksTable) + } + transformers, importENSChanges, err := getTransformers(transformerFlag, bt) + if err != nil { + utils.LogError(nil, err, 0) return } + if importENSChanges { + if err := bt.ImportEnsUpdates(client.GetNativeClient(), math.MaxInt64); err != nil { + utils.LogError(err, "error importing ens from events", 0) + return + } + } + + readGroup := errgroup.Group{} + readGroup.SetLimit(int(concurrency)) + + writeGroup := errgroup.Group{} + writeGroup.SetLimit(int(concurrency*concurrency) + 1) + + cache := freecache.NewCache(100 * 1024 * 1024) // 100 MB limit + quit := make(chan struct{}) + + sink := make(chan *types.Eth1Block) + writeGroup.Go(func() error { + for { + select { + case block, ok := <-sink: + if !ok { + return nil + } + writeGroup.Go(func() error { + if err := bt.SaveBlock(block); err != nil { + return fmt.Errorf("error saving block %v: %w", block.Number, err) + } + err := bt.IndexBlocksWithTransformers([]*types.Eth1Block{block}, transformers, cache) + if err != nil { + return fmt.Errorf("error indexing from bigtable: %w", err) + } + logrus.Infof("%d indexed", block.Number) + return nil + }) + case <-quit: + return nil + } + } + }) + + var errs []error + var mu sync.Mutex + for i := start; i <= end; i = i + batchSize { + height := int64(i) + readGroup.Go(func() error { + heightEnd := height + int64(batchSize) - 1 + if heightEnd > int64(end) { + heightEnd = int64(end) + } + blocks, err := client.GetBlocks(height, heightEnd, "geth") + if err != nil { + mu.Lock() + errs = append(errs, fmt.Errorf("cannot read block range %d-%d: %w", height, heightEnd, err)) + mu.Unlock() + logrus.WithFields(map[string]interface{}{ + "message": err.Error(), + "start": height, + "end": heightEnd, + }).Error("cannot read block range") + return nil + } + for _, block := range blocks { + sink <- block + } + return nil + }) + } + if err := readGroup.Wait(); err != nil { + panic(err) + } + for _, err := range errs { + logrus.Error(err.Error()) + } + quit <- struct{}{} + close(sink) + if err := writeGroup.Wait(); err != nil { + panic(err) + } +} - transforms := make([]func(blk *types.Eth1Block, cache *freecache.Cache) (*types.BulkMutations, *types.BulkMutations, error), 0) +func getTransformers(transformerFlag string, bt *db.Bigtable) ([]db.TransformFunc, bool, error) { + transforms := make([]db.TransformFunc, 0) logrus.Infof("transformerFlag: %v", transformerFlag) transformerList := strings.Split(transformerFlag, ",") @@ -1590,13 +1731,11 @@ func indexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, co transformerList = []string{"TransformBlock", "TransformTx", "TransformBlobTx", "TransformItx", "TransformERC20", "TransformERC721", "TransformERC1155", "TransformWithdrawals", "TransformUncle", "TransformEnsNameRegistered", "TransformContract"} } else if len(transformerList) == 0 { utils.LogError(nil, "no transformer functions provided", 0) - return + return nil, false, fmt.Errorf("no transformer functions provided") } logrus.Infof("transformers: %v", transformerList) + importENSChanges := false - /** - * Add additional transformers you want to sync to this switch case - **/ for _, t := range transformerList { switch t { case "TransformBlock": @@ -1623,10 +1762,31 @@ func indexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, co case "TransformContract": transforms = append(transforms, bt.TransformContract) default: - utils.LogError(nil, "Invalid transformer flag %v", 0) - return + return nil, false, fmt.Errorf("invalid transformer flag %v", t) } } + return transforms, importENSChanges, nil +} + +func indexOldEth1Blocks(startBlock uint64, endBlock uint64, batchSize uint64, concurrency uint64, transformerFlag string, bt *db.Bigtable, client *rpc.ErigonClient) { + if endBlock > 0 && endBlock < startBlock { + utils.LogError(nil, fmt.Sprintf("endBlock [%v] < startBlock [%v]", endBlock, startBlock), 0) + return + } + if concurrency == 0 { + utils.LogError(nil, "concurrency must be greater than 0", 0) + return + } + if bt == nil { + utils.LogError(nil, "no bigtable provided", 0) + return + } + + transforms, importENSChanges, err := getTransformers(transformerFlag, bt) + if err != nil { + utils.LogError(nil, err, 0) + return + } cache := freecache.NewCache(100 * 1024 * 1024) // 100 MB limit diff --git a/cmd/store/main.go b/cmd/store/main.go new file mode 100644 index 0000000000..6ab7f98c8a --- /dev/null +++ b/cmd/store/main.go @@ -0,0 +1,38 @@ +package main + +import ( + "errors" + "flag" + "net/http" + + "github.com/sirupsen/logrus" + + "github.com/gobitfly/eth2-beaconchain-explorer/db2" + "github.com/gobitfly/eth2-beaconchain-explorer/db2/store" + "github.com/gobitfly/eth2-beaconchain-explorer/types" + "github.com/gobitfly/eth2-beaconchain-explorer/utils" +) + +func main() { + configPath := flag.String("config", "config/default.config.yml", "Path to the config file") + flag.Parse() + + cfg := &types.Config{} + err := utils.ReadConfig(cfg, *configPath) + if err != nil { + panic(err) + } + + bt, err := store.NewBigTable(cfg.RawBigtable.Bigtable.Project, cfg.RawBigtable.Bigtable.Instance, nil) + if err != nil { + panic(err) + } + remote := store.NewRemoteStore(store.Wrap(bt, db2.BlocksRawTable, "")) + go func() { + logrus.Info("starting remote raw store on port 8087") + if err := http.ListenAndServe("0.0.0.0:8087", remote.Routes()); err != nil && !errors.Is(err, http.ErrServerClosed) { + panic(err) + } + }() + utils.WaitForCtrlC() +} diff --git a/db/bigtable.go b/db/bigtable.go index 7260ddb821..ed07398aec 100644 --- a/db/bigtable.go +++ b/db/bigtable.go @@ -70,7 +70,7 @@ type Bigtable struct { tableMachineMetrics *gcp_bigtable.Table - redisCache *redis.Client + redisCache RedisClient LastAttestationCache map[uint64]uint64 LastAttestationCacheMux *sync.Mutex @@ -82,10 +82,30 @@ type Bigtable struct { machineMetricsQueuedWritesChan chan (types.BulkMutation) } +type RedisClient interface { + SCard(ctx context.Context, key string) *redis.IntCmd + SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.BoolCmd + Pipeline() redis.Pipeliner + Get(ctx context.Context, key string) *redis.StringCmd + Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd +} + func InitBigtable(project, instance, chainId, redisAddress string) (*Bigtable, error) { + rdc := redis.NewClient(&redis.Options{ + Addr: redisAddress, + ReadTimeout: time.Second * 20, + }) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + if err := rdc.Ping(ctx).Err(); err != nil { + return nil, err + } - if utils.Config.Bigtable.Emulator { + return InitBigtableWithCache(ctx, project, instance, chainId, rdc) +} +func InitBigtableWithCache(ctx context.Context, project, instance, chainId string, rdc RedisClient) (*Bigtable, error) { + if utils.Config.Bigtable.Emulator { if utils.Config.Bigtable.EmulatorHost == "" { utils.Config.Bigtable.EmulatorHost = "127.0.0.1" } @@ -96,25 +116,13 @@ func InitBigtable(project, instance, chainId, redisAddress string) (*Bigtable, e logger.Fatalf("unable to set bigtable emulator environment variable: %v", err) } } - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - defer cancel() poolSize := 50 btClient, err := gcp_bigtable.NewClient(ctx, project, instance, option.WithGRPCConnectionPool(poolSize)) - // btClient, err := gcp_bigtable.NewClient(context.Background(), project, instance) if err != nil { return nil, err } - rdc := redis.NewClient(&redis.Options{ - Addr: redisAddress, - ReadTimeout: time.Second * 20, - }) - - if err := rdc.Ping(ctx).Err(); err != nil { - return nil, err - } - bt := &Bigtable{ client: btClient, tableData: btClient.Open("data"), diff --git a/db/bigtable_eth1.go b/db/bigtable_eth1.go index a33906d310..3ceaa2c472 100644 --- a/db/bigtable_eth1.go +++ b/db/bigtable_eth1.go @@ -11,6 +11,7 @@ import ( "log" "math/big" "sort" + "strconv" "strings" "sync" "time" @@ -24,8 +25,6 @@ import ( "github.com/gobitfly/eth2-beaconchain-explorer/types" "github.com/gobitfly/eth2-beaconchain-explorer/utils" - "strconv" - gcp_bigtable "cloud.google.com/go/bigtable" "golang.org/x/sync/errgroup" "google.golang.org/protobuf/types/known/timestamppb" @@ -36,7 +35,6 @@ import ( "github.com/ethereum/go-ethereum/common/math" eth_types "github.com/ethereum/go-ethereum/core/types" "github.com/go-redis/redis/v8" - "github.com/sirupsen/logrus" "google.golang.org/protobuf/proto" ) @@ -661,7 +659,7 @@ func TimestampToBigtableTimeDesc(ts time.Time) string { return fmt.Sprintf("%04d%02d%02d%02d%02d%02d", 9999-ts.Year(), 12-ts.Month(), 31-ts.Day(), 23-ts.Hour(), 59-ts.Minute(), 59-ts.Second()) } -func (bigtable *Bigtable) IndexEventsWithTransformers(start, end int64, transforms []func(blk *types.Eth1Block, cache *freecache.Cache) (bulkData *types.BulkMutations, bulkMetadataUpdates *types.BulkMutations, err error), concurrency int64, cache *freecache.Cache) error { +func (bigtable *Bigtable) IndexEventsWithTransformers(start, end int64, transforms []TransformFunc, concurrency int64, cache *freecache.Cache) error { g := new(errgroup.Group) g.SetLimit(int(concurrency)) @@ -768,6 +766,59 @@ func (bigtable *Bigtable) IndexEventsWithTransformers(start, end int64, transfor return nil } +type TransformFunc func(blk *types.Eth1Block, cache *freecache.Cache) (bulkData *types.BulkMutations, bulkMetadataUpdates *types.BulkMutations, err error) + +func (bigtable *Bigtable) blockKeysMutation(blockNumber uint64, blockHash []byte, keys string) (string, *gcp_bigtable.Mutation) { + mut := gcp_bigtable.NewMutation() + mut.Set(METADATA_UPDATES_FAMILY_BLOCKS, "keys", gcp_bigtable.Now(), []byte(keys)) + + key := fmt.Sprintf("%s:BLOCK:%s:%x", bigtable.chainId, reversedPaddedBlockNumber(blockNumber), blockHash) + return key, mut +} + +func (bigtable *Bigtable) IndexBlocksWithTransformers(blocks []*types.Eth1Block, transforms []TransformFunc, cache *freecache.Cache) error { + bulkMutsData := types.BulkMutations{} + bulkMutsMetadataUpdate := types.BulkMutations{} + for _, block := range blocks { + for _, transform := range transforms { + mutsData, mutsMetadataUpdate, err := transform(block, cache) + if err != nil { + logrus.WithError(err).Errorf("error transforming block [%v]", block.Number) + } + bulkMutsData.Keys = append(bulkMutsData.Keys, mutsData.Keys...) + bulkMutsData.Muts = append(bulkMutsData.Muts, mutsData.Muts...) + + if mutsMetadataUpdate != nil { + bulkMutsMetadataUpdate.Keys = append(bulkMutsMetadataUpdate.Keys, mutsMetadataUpdate.Keys...) + bulkMutsMetadataUpdate.Muts = append(bulkMutsMetadataUpdate.Muts, mutsMetadataUpdate.Muts...) + } + + if len(mutsData.Keys) > 0 { + metaKeys := strings.Join(bulkMutsData.Keys, ",") // save block keys in order to be able to handle chain reorgs + key, mut := bigtable.blockKeysMutation(block.Number, block.Hash, metaKeys) + bulkMutsMetadataUpdate.Keys = append(bulkMutsMetadataUpdate.Keys, key) + bulkMutsMetadataUpdate.Muts = append(bulkMutsMetadataUpdate.Muts, mut) + } + } + } + + if len(bulkMutsData.Keys) > 0 { + err := bigtable.WriteBulk(&bulkMutsData, bigtable.tableData, DEFAULT_BATCH_INSERTS) + if err != nil { + return fmt.Errorf("error writing blocks [%v-%v] to bigtable data table: %w", blocks[0].Number, blocks[len(blocks)-1].Number, err) + } + } + + if len(bulkMutsMetadataUpdate.Keys) > 0 { + err := bigtable.WriteBulk(&bulkMutsMetadataUpdate, bigtable.tableMetadataUpdates, DEFAULT_BATCH_INSERTS) + if err != nil { + return fmt.Errorf("error writing blocks [%v-%v] to bigtable metadata updates table: %w", blocks[0].Number, blocks[len(blocks)-1].Number, err) + } + } + + return nil +} + // TransformBlock extracts blocks from bigtable more specifically from the table blocks. // It transforms the block and strips any information that is not necessary for a blocks view // It writes blocks to table data: @@ -1011,11 +1062,22 @@ func (bigtable *Bigtable) TransformTx(blk *types.Eth1Block, cache *freecache.Cac Value: tx.GetValue(), TxFee: fee, GasPrice: tx.GetGasPrice(), + IsContractCreation: isContract, + ErrorMsg: "", BlobTxFee: blobFee, BlobGasPrice: tx.GetBlobGasPrice(), - IsContractCreation: isContract, - ErrorMsg: tx.GetErrorMsg(), + Status: types.StatusType(tx.Status), + } + for _, itx := range tx.Itx { + if itx.ErrorMsg != "" { + indexedTx.ErrorMsg = itx.ErrorMsg + if indexedTx.Status == types.StatusType_SUCCESS { + indexedTx.Status = types.StatusType_PARTIAL + } + break + } } + // Mark Sender and Recipient for balance update bigtable.markBalanceUpdate(indexedTx.From, []byte{0x0}, bulkMetadataUpdates, cache) bigtable.markBalanceUpdate(indexedTx.To, []byte{0x0}, bulkMetadataUpdates, cache) @@ -1107,9 +1169,16 @@ func (bigtable *Bigtable) TransformBlobTx(blk *types.Eth1Block, cache *freecache GasPrice: tx.GetGasPrice(), BlobTxFee: blobFee, BlobGasPrice: tx.GetBlobGasPrice(), - ErrorMsg: tx.GetErrorMsg(), + ErrorMsg: "", BlobVersionedHashes: tx.GetBlobVersionedHashes(), } + for _, itx := range tx.Itx { + if itx.ErrorMsg != "" { + indexedTx.ErrorMsg = itx.ErrorMsg + break + } + } + // Mark Sender and Recipient for balance update bigtable.markBalanceUpdate(indexedTx.From, []byte{0x0}, bulkMetadataUpdates, cache) bigtable.markBalanceUpdate(indexedTx.To, []byte{0x0}, bulkMetadataUpdates, cache) @@ -1218,7 +1287,7 @@ func (bigtable *Bigtable) TransformContract(blk *types.Eth1Block, cache *freecac contractUpdate := &types.IsContractUpdate{ IsContract: itx.GetType() == "create", // also use success status of enclosing transaction, as even successful sub-calls can still be reverted later in the tx - Success: itx.GetErrorMsg() == "" && tx.GetErrorMsg() == "", + Success: itx.GetErrorMsg() == "" && tx.GetStatus() == 1, } b, err := proto.Marshal(contractUpdate) if err != nil { @@ -1286,12 +1355,26 @@ func (bigtable *Bigtable) TransformItx(blk *types.Eth1Block, cache *freecache.Ca } iReversed := reversePaddedIndex(i, TX_PER_BLOCK_LIMIT) + var revertSource string for j, itx := range tx.GetItx() { - if j >= ITX_PER_TX_LIMIT { + if j > ITX_PER_TX_LIMIT { return nil, nil, fmt.Errorf("unexpected number of internal transactions in block expected at most %d but got: %v, tx: %x", ITX_PER_TX_LIMIT, j, tx.GetHash()) } jReversed := reversePaddedIndex(j, ITX_PER_TX_LIMIT) + // check for error before skipping, otherwise we loose track of cascading reverts + var reverted bool + if itx.ErrorMsg != "" { + reverted = true + // only save the highest root revert + if revertSource == "" || !strings.HasPrefix(itx.Path, revertSource) { + revertSource = strings.TrimSuffix(itx.Path, "]") + } + } + if revertSource != "" && strings.HasPrefix(itx.Path, revertSource) { + reverted = true + } + if itx.Path == "[]" || bytes.Equal(itx.Value, []byte{0x0}) { // skip top level and empty calls continue } @@ -1305,6 +1388,7 @@ func (bigtable *Bigtable) TransformItx(blk *types.Eth1Block, cache *freecache.Ca From: itx.GetFrom(), To: itx.GetTo(), Value: itx.GetValue(), + Reverted: reverted, } bigtable.markBalanceUpdate(indexedItx.To, []byte{0x0}, bulkMetadataUpdates, cache) @@ -2346,7 +2430,7 @@ func (bigtable *Bigtable) GetAddressTransactionsTableData(address []byte, pageTo } tableData[i] = []interface{}{ - utils.FormatTransactionHash(t.Hash, t.ErrorMsg == ""), + utils.FormatTransactionHashFromStatus(t.Hash, t.Status), utils.FormatMethod(bigtable.GetMethodLabel(t.MethodId, contractInteraction)), utils.FormatBlockNumber(t.BlockNumber), utils.FormatTimestamp(t.Time.AsTime().Unix()), @@ -2825,7 +2909,7 @@ func (bigtable *Bigtable) GetAddressInternalTableData(address []byte, pageToken } tableData[i] = []interface{}{ - utils.FormatTransactionHash(t.ParentHash, true), + utils.FormatTransactionHash(t.ParentHash, !t.Reverted), utils.FormatBlockNumber(t.BlockNumber), utils.FormatTimestamp(t.Time.AsTime().Unix()), utils.FormatAddressWithLimitsInAddressPageTable(address, t.From, BigtableClient.GetAddressLabel(fromName, from_contractInteraction), from_contractInteraction != types.CONTRACT_NONE, digitLimitInAddressPagesTable, nameLimitInAddressPagesTable, true), @@ -2869,7 +2953,20 @@ func (bigtable *Bigtable) GetInternalTransfersForTransaction(transaction []byte, } data := make([]types.ITransaction, 0, len(parityTrace)-1) + var revertSource []int64 for i := 1; i < len(parityTrace); i++ { + var reverted bool + if parityTrace[i].Error != "" { + reverted = true + // only save the highest root revert + if !isSubset(parityTrace[i].TraceAddress, revertSource) { + revertSource = parityTrace[i].TraceAddress + } + } + if isSubset(parityTrace[i].TraceAddress, revertSource) { + reverted = true + } + from, to, value, tx_type := parityTrace[i].ConvertFields() if tx_type == "suicide" { // erigon's "suicide" might be misleading for users @@ -2896,8 +2993,9 @@ func (bigtable *Bigtable) GetInternalTransfersForTransaction(transaction []byte, From: utils.FormatAddress(from, nil, fromName, false, from_contractInteraction != types.CONTRACT_NONE, true), To: utils.FormatAddress(to, nil, toName, false, to_contractInteraction != types.CONTRACT_NONE, true), Amount: utils.FormatElCurrency(value, currency, 8, true, false, false, true), - TracePath: utils.FormatTracePath(tx_type, parityTrace[i].TraceAddress, parityTrace[i].Error == "", bigtable.GetMethodLabel(input, from_contractInteraction)), + TracePath: utils.FormatTracePath(tx_type, parityTrace[i].TraceAddress, !reverted, bigtable.GetMethodLabel(input, from_contractInteraction)), Advanced: tx_type == "delegatecall" || string(value) == "\x00", + Reverted: reverted, } gaslimit, err := strconv.ParseUint(parityTrace[i].Action.Gas, 0, 0) @@ -4864,3 +4962,95 @@ func (bigtable *Bigtable) GetGasNowHistory(ts, pastTs time.Time) ([]types.GasNow } return history, nil } + +func (bigtable *Bigtable) ReindexITxsFromNode(start, end, batchSize, concurrency int64, transforms []func(blk *types.Eth1Block, cache *freecache.Cache) (bulkData *types.BulkMutations, bulkMetadataUpdates *types.BulkMutations, err error), cache *freecache.Cache) error { + g := new(errgroup.Group) + g.SetLimit(int(concurrency)) + + if start == 0 && end == 0 { + return fmt.Errorf("start or end block height can't be 0") + } + + if end < start { + return fmt.Errorf("end block must be grater or equal to start block") + } + + logrus.Infof("reindexing txs for blocks from %d to %d", start, end) + + for i := start; i <= end; i += batchSize { + firstBlock := i + lastBlock := firstBlock + batchSize - 1 + if lastBlock > end { + lastBlock = end + } + + blockNumbers := make([]int64, 0, lastBlock-firstBlock+1) + for b := firstBlock; b <= lastBlock; b++ { + blockNumbers = append(blockNumbers, b) + } + + g.Go(func() error { + blocks, err := rpc.CurrentErigonClient.GetBlocksByBatch(blockNumbers) + if err != nil { + return fmt.Errorf("error getting blocks by batch from %v to %v: %v", firstBlock, lastBlock, err) + } + + subG := new(errgroup.Group) + subG.SetLimit(int(concurrency)) + + for _, block := range blocks { + currentBlock := block + subG.Go(func() error { + bulkMutsData := types.BulkMutations{} + bulkMutsMetadataUpdate := types.BulkMutations{} + for _, transform := range transforms { + mutsData, mutsMetadataUpdate, err := transform(currentBlock, cache) + if err != nil { + logrus.WithError(err).Errorf("error transforming block [%v]", currentBlock.Number) + } + bulkMutsData.Keys = append(bulkMutsData.Keys, mutsData.Keys...) + bulkMutsData.Muts = append(bulkMutsData.Muts, mutsData.Muts...) + + if mutsMetadataUpdate != nil { + bulkMutsMetadataUpdate.Keys = append(bulkMutsMetadataUpdate.Keys, mutsMetadataUpdate.Keys...) + bulkMutsMetadataUpdate.Muts = append(bulkMutsMetadataUpdate.Muts, mutsMetadataUpdate.Muts...) + } + } + + if len(bulkMutsData.Keys) > 0 { + metaKeys := strings.Join(bulkMutsData.Keys, ",") // save block keys in order to be able to handle chain reorgs + err := bigtable.SaveBlockKeys(currentBlock.Number, currentBlock.Hash, metaKeys) + if err != nil { + return fmt.Errorf("error saving block [%v] keys to bigtable metadata updates table: %w", currentBlock.Number, err) + } + + err = bigtable.WriteBulk(&bulkMutsData, bigtable.tableData, DEFAULT_BATCH_INSERTS) + if err != nil { + return fmt.Errorf("error writing block [%v] to bigtable data table: %w", currentBlock.Number, err) + } + } + + if len(bulkMutsMetadataUpdate.Keys) > 0 { + err := bigtable.WriteBulk(&bulkMutsMetadataUpdate, bigtable.tableMetadataUpdates, DEFAULT_BATCH_INSERTS) + if err != nil { + return fmt.Errorf("error writing block [%v] to bigtable metadata updates table: %w", currentBlock.Number, err) + } + } + + return nil + }) + } + return subG.Wait() + }) + + } + + if err := g.Wait(); err == nil { + logrus.Info("data table indexing completed") + } else { + utils.LogError(err, "wait group error", 0) + return err + } + + return nil +} diff --git a/db/bigtable_init.go b/db/bigtable_init.go index 48f055f251..5aad325710 100644 --- a/db/bigtable_init.go +++ b/db/bigtable_init.go @@ -12,6 +12,8 @@ import ( gcp_bigtable "cloud.google.com/go/bigtable" ) +var ErrTableAlreadyExist = fmt.Errorf("aborting bigtable schema init as tables are already present") + func InitBigtableSchema() error { tables := make(map[string]map[string]gcp_bigtable.GCPolicy) @@ -75,7 +77,7 @@ func InitBigtableSchema() error { } if len(existingTables) > 0 { - return fmt.Errorf("aborting bigtable schema init as tables are already present") + return ErrTableAlreadyExist } for name, definition := range tables { diff --git a/db/db.go b/db/db.go index 3ab5c84586..6b3d873203 100644 --- a/db/db.go +++ b/db/db.go @@ -2,6 +2,7 @@ package db import ( "bytes" + "context" "database/sql" "embed" "encoding/hex" @@ -38,9 +39,19 @@ var EmbedMigrations embed.FS var DBPGX *pgxpool.Conn +type SQLReaderDb interface { + Close() error + Get(dest interface{}, query string, args ...interface{}) error + Select(dest interface{}, query string, args ...interface{}) error + SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error + Query(query string, args ...any) (*sql.Rows, error) + Preparex(query string) (*sqlx.Stmt, error) + Rebind(query string) string +} + // DB is a pointer to the explorer-database var WriterDb *sqlx.DB -var ReaderDb *sqlx.DB +var ReaderDb SQLReaderDb var ClickhouseReaderDb *sqlx.DB diff --git a/db/db_test.go b/db/db_test.go new file mode 100644 index 0000000000..60d3107415 --- /dev/null +++ b/db/db_test.go @@ -0,0 +1,178 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "html/template" + "os" + "strings" + "testing" + "time" + + "github.com/coocood/freecache" + "github.com/ethereum/go-ethereum/common" + "github.com/go-redis/redis/v8" + "github.com/jmoiron/sqlx" + + "github.com/gobitfly/eth2-beaconchain-explorer/rpc" + "github.com/gobitfly/eth2-beaconchain-explorer/types" + "github.com/gobitfly/eth2-beaconchain-explorer/utils" +) + +func TestTxRevertTransformer(t *testing.T) { + node, exists := os.LookupEnv("ERIGON_NODE") + if !exists { + t.Skip() + } + + erigon, err := rpc.NewErigonClient(node) + if err != nil { + t.Fatal(err) + } + rpc.CurrentErigonClient = erigon + + utils.Config = &types.Config{ + Chain: types.Chain{ + ClConfig: types.ClChainConfig{ + DepositChainID: 1, + }, + }, + Bigtable: types.Bigtable{ + Project: "test", + Instance: "instanceTest", + Emulator: true, + EmulatorPort: 8086, + EmulatorHost: "127.0.0.1", + }, + Frontend: types.Frontend{ + ElCurrencyDivisor: 1e18, + }, + } + ReaderDb = noSQLReaderDb{} + + bt, err := InitBigtableWithCache(context.Background(), "test", "instanceTest", "1", noRedis{}) + if err != nil { + t.Fatal(err) + } + if err := InitBigtableSchema(); err != nil { + if !errors.Is(err, ErrTableAlreadyExist) { + t.Fatal(err) + } + } + + tests := []struct { + name string + block int64 + txHash string + addresses []string + expected []string + }{ + { + name: "partial", + block: 20183291, + txHash: "0xf7d385f000250c073dfef9a36327c5d30a4c77a0c50588ce3eded29f6829a4cd", + addresses: []string{"0x96abc34501e9fc274f6d4e39cbb4004c0f6e519f"}, + expected: []string{"Transaction partially executed"}, + }, + { + name: "failed", + block: 20929404, + txHash: "0xcce69bddc2b427ecf2f02120b74cde9f5d95f36849f4617fbb31527982daf88c", + addresses: []string{"0x0d92bC7b13a474937c7C94F882339D68048Af186"}, + expected: []string{"Transaction failed"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + block, _, err := erigon.GetBlock(tt.block, "geth") + if err != nil { + t.Fatal(err) + } + if err := bt.SaveBlock(block); err != nil { + t.Fatal(err) + } + transformers := []TransformFunc{ + bt.TransformItx, + bt.TransformTx, + } + cache := freecache.NewCache(1 * 1024 * 1024) // 1 MB limit + + if err := bt.IndexEventsWithTransformers(tt.block, tt.block, transformers, 1, cache); err != nil { + t.Fatal(err) + } + + for i, address := range tt.addresses { + res, err := bt.GetAddressTransactionsTableData(common.FromHex(address), "") + if err != nil { + t.Fatal(err) + } + if got, want := string((res.Data[0][0]).(template.HTML)), tt.expected[i]; !strings.Contains(got, want) { + t.Errorf("'%s' should contains '%s'", got, want) + } + } + }) + } +} + +type noRedis struct { +} + +func (n noRedis) SCard(ctx context.Context, key string) *redis.IntCmd { + return redis.NewIntCmd(ctx) +} + +func (n noRedis) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.BoolCmd { + return redis.NewBoolCmd(ctx) +} + +func (n noRedis) Pipeline() redis.Pipeliner { + //TODO implement me + panic("implement me") +} + +func (n noRedis) Get(ctx context.Context, key string) *redis.StringCmd { + cmd := redis.NewStringCmd(ctx) + cmd.SetErr(redis.Nil) + return cmd +} + +func (n noRedis) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd { + return redis.NewStatusCmd(ctx) +} + +type noSQLReaderDb struct { +} + +func (n noSQLReaderDb) Close() error { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) Get(dest interface{}, query string, args ...interface{}) error { + return nil +} + +func (n noSQLReaderDb) Select(dest interface{}, query string, args ...interface{}) error { + return nil +} + +func (n noSQLReaderDb) Query(query string, args ...any) (*sql.Rows, error) { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) Preparex(query string) (*sqlx.Stmt, error) { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) Rebind(query string) string { + //TODO implement me + panic("implement me") +} diff --git a/db/ens.go b/db/ens.go index 943fab3e29..ec2440236b 100644 --- a/db/ens.go +++ b/db/ens.go @@ -81,6 +81,8 @@ func (bigtable *Bigtable) TransformEnsNameRegistered(blk *types.Eth1Block, cache metrics.TaskDuration.WithLabelValues("bt_transform_ens").Observe(time.Since(startTime).Seconds()) }() + bulkData = &types.BulkMutations{} + bulkMetadataUpdates = &types.BulkMutations{} var ensCrontractAddresses map[string]string switch bigtable.chainId { case "1": @@ -90,11 +92,9 @@ func (bigtable *Bigtable) TransformEnsNameRegistered(blk *types.Eth1Block, cache case "11155111": ensCrontractAddresses = ensContracts.ENSCrontractAddressesSepolia default: - return nil, nil, nil + return bulkData, bulkMetadataUpdates, nil } - bulkData = &types.BulkMutations{} - bulkMetadataUpdates = &types.BulkMutations{} keys := make(map[string]bool) ethLog := eth_types.Log{} diff --git a/db/statistics.go b/db/statistics.go index 27d27ee88a..dea0eb9a11 100644 --- a/db/statistics.go +++ b/db/statistics.go @@ -1637,7 +1637,7 @@ func WriteExecutionChartSeriesForDay(day int64) error { totalBlobCount = totalBlobCount.Add(decimal.NewFromInt(int64(len(tx.BlobVersionedHashes)))) default: - logger.Fatalf("error unknown tx type %v hash: %x", tx.Status, tx.Hash) + logger.Fatalf("error unknown tx type %v hash: %x", tx.Type, tx.Hash) } totalTxFees = totalTxFees.Add(txFees) @@ -1646,7 +1646,7 @@ func WriteExecutionChartSeriesForDay(day int64) error { failedTxCount += 1 totalFailedGasUsed = totalFailedGasUsed.Add(gasUsed) totalFailedTxFee = totalFailedTxFee.Add(txFees) - case 1: + case 1, 2: successTxCount += 1 default: logger.Fatalf("error unknown status code %v hash: %x", tx.Status, tx.Hash) diff --git a/db/utils.go b/db/utils.go new file mode 100644 index 0000000000..165ee09bcf --- /dev/null +++ b/db/utils.go @@ -0,0 +1,16 @@ +package db + +func isSubset[E comparable](big []E, short []E) bool { + if len(short) == 0 { + return false + } + if len(big) < len(short) { + return false + } + for i := 0; i < len(short); i++ { + if big[i] != short[i] { + return false + } + } + return true +} diff --git a/db2/cache.go b/db2/cache.go new file mode 100644 index 0000000000..c086b4eb51 --- /dev/null +++ b/db2/cache.go @@ -0,0 +1,119 @@ +package db2 + +import ( + "encoding/json" + "sync" + "time" +) + +const ( + oneBlockTTL = 1 * time.Second + blocksTTL = 30 * time.Second // default ttl, if read it will be deleted sooner +) + +type MinimalBlock struct { + Result struct { + Hash string `json:"hash"` + } `json:"result"` +} + +type CachedRawStore struct { + db RawStoreReader + // sync.Map with manual delete have better perf than freecache because we can handle this way a ttl < 1s + cache sync.Map + + locks map[string]*sync.RWMutex + mapLock sync.Mutex // to make the map safe concurrently +} + +func WithCache(reader RawStoreReader) *CachedRawStore { + return &CachedRawStore{ + db: reader, + locks: make(map[string]*sync.RWMutex), + } +} + +func (c *CachedRawStore) lockBy(key string) func() { + c.mapLock.Lock() + defer c.mapLock.Unlock() + + lock, found := c.locks[key] + if !found { + lock = &sync.RWMutex{} + c.locks[key] = lock + lock.Lock() + return lock.Unlock + } + lock.RLock() + return lock.RUnlock +} + +func (c *CachedRawStore) ReadBlockByNumber(chainID uint64, number int64) (*FullBlockRawData, error) { + key := blockKey(chainID, number) + + unlock := c.lockBy(key) + defer unlock() + + v, ok := c.cache.Load(key) + if ok { + // once read ensure to delete it from the cache + go c.unCacheBlockAfter(key, "", oneBlockTTL) + return v.(*FullBlockRawData), nil + } + // TODO make warning not found in cache + block, err := c.db.ReadBlockByNumber(chainID, number) + if block != nil { + c.cacheBlock(block, oneBlockTTL) + } + return block, err +} + +func (c *CachedRawStore) cacheBlock(block *FullBlockRawData, ttl time.Duration) { + key := blockKey(block.ChainID, block.BlockNumber) + c.cache.Store(key, block) + + var mini MinimalBlock + if len(block.Uncles) != 0 { + // retrieve the block hash for caching but only if the block has uncle(s) + _ = json.Unmarshal(block.Block, &mini) + c.cache.Store(mini.Result.Hash, block.BlockNumber) + } + + go c.unCacheBlockAfter(key, mini.Result.Hash, ttl) +} + +func (c *CachedRawStore) unCacheBlockAfter(key, hash string, ttl time.Duration) { + time.Sleep(ttl) + c.cache.Delete(key) + c.mapLock.Lock() + if hash != "" { + c.cache.Delete(hash) + } + defer c.mapLock.Unlock() + delete(c.locks, key) +} + +func (c *CachedRawStore) ReadBlockByHash(chainID uint64, hash string) (*FullBlockRawData, error) { + v, ok := c.cache.Load(hash) + if !ok { + return c.db.ReadBlockByHash(chainID, hash) + } + + v, ok = c.cache.Load(blockKey(chainID, v.(int64))) + if !ok { + return c.db.ReadBlockByHash(chainID, hash) + } + + return v.(*FullBlockRawData), nil +} + +func (c *CachedRawStore) ReadBlocksByNumber(chainID uint64, start, end int64) ([]*FullBlockRawData, error) { + blocks, err := c.db.ReadBlocksByNumber(chainID, start, end) + if err != nil { + return nil, err + } + for _, block := range blocks { + c.cacheBlock(block, blocksTTL) + } + return blocks, nil +} diff --git a/db2/client.go b/db2/client.go new file mode 100644 index 0000000000..48d2263f43 --- /dev/null +++ b/db2/client.go @@ -0,0 +1,257 @@ +package db2 + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "syscall" + + "github.com/ethereum/go-ethereum/common/hexutil" + + "github.com/gobitfly/eth2-beaconchain-explorer/db2/store" +) + +var ErrNotFoundInCache = fmt.Errorf("cannot find hash in cache") +var ErrMethodNotSupported = fmt.Errorf("method not supported") + +type RawStoreReader interface { + ReadBlockByNumber(chainID uint64, number int64) (*FullBlockRawData, error) + ReadBlockByHash(chainID uint64, hash string) (*FullBlockRawData, error) + ReadBlocksByNumber(chainID uint64, start, end int64) ([]*FullBlockRawData, error) +} + +type WithFallback struct { + roundTripper http.RoundTripper + fallback http.RoundTripper +} + +func NewWithFallback(roundTripper, fallback http.RoundTripper) *WithFallback { + return &WithFallback{ + roundTripper: roundTripper, + fallback: fallback, + } +} + +func (r WithFallback) RoundTrip(request *http.Request) (*http.Response, error) { + resp, err := r.roundTripper.RoundTrip(request) + if err == nil { + // no fallback needed + return resp, nil + } + + var e1 *json.SyntaxError + if !errors.As(err, &e1) && + !errors.Is(err, ErrNotFoundInCache) && + !errors.Is(err, ErrMethodNotSupported) && + !errors.Is(err, store.ErrNotFound) && + !errors.Is(err, syscall.ECONNRESET) { + return nil, err + } + + return r.fallback.RoundTrip(request) +} + +type BigTableEthRaw struct { + db RawStoreReader + chainID uint64 +} + +func NewBigTableEthRaw(db RawStoreReader, chainID uint64) *BigTableEthRaw { + return &BigTableEthRaw{ + db: db, + chainID: chainID, + } +} + +func (r *BigTableEthRaw) RoundTrip(request *http.Request) (*http.Response, error) { + body, err := io.ReadAll(request.Body) + if err != nil { + return nil, err + } + defer func() { + request.Body = io.NopCloser(bytes.NewBuffer(body)) + }() + var messages []*jsonrpcMessage + var isSingle bool + if err := json.NewDecoder(bytes.NewReader(body)).Decode(&messages); err != nil { + isSingle = true + message := new(jsonrpcMessage) + if err := json.NewDecoder(bytes.NewReader(body)).Decode(message); err != nil { + return nil, err + } + messages = append(messages, message) + } + var resps []*jsonrpcMessage + for _, message := range messages { + resp, err := r.handle(request.Context(), message) + if err != nil { + return nil, err + } + resps = append(resps, resp) + } + + respBody, _ := makeBody(isSingle, resps) + return &http.Response{ + Body: respBody, + StatusCode: http.StatusOK, + }, nil +} + +func (r *BigTableEthRaw) handle(ctx context.Context, message *jsonrpcMessage) (*jsonrpcMessage, error) { + var args []interface{} + err := json.Unmarshal(message.Params, &args) + if err != nil { + return nil, err + } + + var respBody []byte + switch message.Method { + case "eth_getBlockByNumber": + // we decode only big.Int maybe we should also handle "latest" + block, err := hexutil.DecodeBig(args[0].(string)) + if err != nil { + return nil, err + } + + respBody, err = r.BlockByNumber(ctx, block) + if err != nil { + return nil, err + } + + case "debug_traceBlockByNumber": + block, err := hexutil.DecodeBig(args[0].(string)) + if err != nil { + return nil, err + } + + respBody, err = r.TraceBlockByNumber(ctx, block) + if err != nil { + return nil, err + } + + case "eth_getBlockReceipts": + block, err := hexutil.DecodeBig(args[0].(string)) + if err != nil { + return nil, err + } + + respBody, err = r.BlockReceipts(ctx, block) + if err != nil { + return nil, err + } + + case "eth_getUncleByBlockHashAndIndex": + index, err := hexutil.DecodeBig(args[1].(string)) + if err != nil { + return nil, err + } + respBody, err = r.UncleByBlockHashAndIndex(ctx, args[0].(string), index.Int64()) + if err != nil { + return nil, err + } + default: + return nil, ErrMethodNotSupported + } + var resp jsonrpcMessage + _ = json.Unmarshal(respBody, &resp) + if len(respBody) == 0 { + resp.Version = message.Version + resp.Result = []byte("[]") + } + resp.ID = message.ID + return &resp, nil +} + +func makeBody(isSingle bool, messages []*jsonrpcMessage) (io.ReadCloser, error) { + var b []byte + var err error + if isSingle { + b, err = json.Marshal(messages[0]) + } else { + b, err = json.Marshal(messages) + } + if err != nil { + return nil, err + } + return io.NopCloser(bytes.NewReader(b)), nil +} + +func (r *BigTableEthRaw) BlockByNumber(ctx context.Context, number *big.Int) ([]byte, error) { + block, err := r.db.ReadBlockByNumber(r.chainID, number.Int64()) + if err != nil { + return nil, err + } + return block.Block, nil +} + +func (r *BigTableEthRaw) BlockReceipts(ctx context.Context, number *big.Int) ([]byte, error) { + block, err := r.db.ReadBlockByNumber(r.chainID, number.Int64()) + if err != nil { + return nil, err + } + return block.Receipts, nil +} + +func (r *BigTableEthRaw) TraceBlockByNumber(ctx context.Context, number *big.Int) ([]byte, error) { + block, err := r.db.ReadBlockByNumber(r.chainID, number.Int64()) + if err != nil { + return nil, err + } + return block.Traces, nil +} + +func (r *BigTableEthRaw) UncleByBlockNumberAndIndex(ctx context.Context, number *big.Int, index int64) ([]byte, error) { + block, err := r.db.ReadBlockByNumber(r.chainID, number.Int64()) + if err != nil { + return nil, err + } + + var uncles []*jsonrpcMessage + if err := json.Unmarshal(block.Uncles, &uncles); err != nil { + var uncle *jsonrpcMessage + if err := json.Unmarshal(block.Uncles, &uncle); err != nil { + return nil, fmt.Errorf("cannot unmarshal uncle: %w", err) + } + return json.Marshal(uncle) + } + return json.Marshal(uncles[index]) +} + +func (r *BigTableEthRaw) UncleByBlockHashAndIndex(ctx context.Context, hash string, index int64) ([]byte, error) { + block, err := r.db.ReadBlockByHash(r.chainID, hash) + if err != nil { + return nil, err + } + + var uncles []*jsonrpcMessage + if err := json.Unmarshal(block.Uncles, &uncles); err != nil { + var uncle *jsonrpcMessage + if err := json.Unmarshal(block.Uncles, &uncle); err != nil { + return nil, fmt.Errorf("cannot unmarshal uncle: %w", err) + } + return json.Marshal(uncle) + } + return json.Marshal(uncles[index]) +} + +// A value of this type can a JSON-RPC request, notification, successful response or +// error response. Which one it is depends on the fields. +type jsonrpcMessage struct { + Version string `json:"jsonrpc,omitempty"` + ID json.RawMessage `json:"id,omitempty"` + Method string `json:"method,omitempty"` + Params json.RawMessage `json:"params,omitempty"` + Error *jsonError `json:"error,omitempty"` + Result json.RawMessage `json:"result,omitempty"` +} + +type jsonError struct { + Code int `json:"code"` + Message string `json:"message"` + Data interface{} `json:"data,omitempty"` +} diff --git a/db2/client_test.go b/db2/client_test.go new file mode 100644 index 0000000000..f6d10b36a1 --- /dev/null +++ b/db2/client_test.go @@ -0,0 +1,318 @@ +package db2 + +import ( + "context" + "math/big" + "net/http" + "os" + "testing" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/gobitfly/eth2-beaconchain-explorer/db2/store" + "github.com/gobitfly/eth2-beaconchain-explorer/db2/storetest" +) + +const ( + chainID uint64 = 1 +) + +func TestBigTableClientRealCondition(t *testing.T) { + project := os.Getenv("BIGTABLE_PROJECT") + instance := os.Getenv("BIGTABLE_INSTANCE") + if project == "" || instance == "" { + t.Skip("skipping test, set BIGTABLE_PROJECT and BIGTABLE_INSTANCE") + } + + tests := []struct { + name string + block int64 + chainID uint64 + }{ + { + name: "test block", + block: 6008149, + chainID: 1, + }, + { + name: "test block 2", + block: 141, + chainID: 1, + }, + { + name: "test missmatch tx and receipts", + block: 37938291, + chainID: 100, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + bg, err := store.NewBigTable(project, instance, nil) + if err != nil { + t.Fatal(err) + } + + rawStore := NewRawStore(store.Wrap(bg, BlocksRawTable, "")) + rpcClient, err := rpc.DialOptions(context.Background(), "http://foo.bar", rpc.WithHTTPClient(&http.Client{ + Transport: NewBigTableEthRaw(rawStore, tt.chainID), + })) + if err != nil { + t.Fatal(err) + } + ethClient := ethclient.NewClient(rpcClient) + + block, err := ethClient.BlockByNumber(context.Background(), big.NewInt(tt.block)) + if err != nil { + t.Fatalf("BlockByNumber() error = %v", err) + } + if got, want := block.Number().Int64(), tt.block; got != want { + t.Errorf("got %v, want %v", got, want) + } + + receipts, err := ethClient.BlockReceipts(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(tt.block))) + if err != nil { + t.Fatalf("BlockReceipts() error = %v", err) + } + if len(block.Transactions()) != 0 && len(receipts) == 0 { + t.Errorf("receipts should not be empty") + } + if len(block.Transactions()) != len(receipts) { + t.Errorf("missmatch between receipt and block length") + } + + var traces []GethTraceCallResultWrapper + if err := rpcClient.Call(&traces, "debug_traceBlockByNumber", hexutil.EncodeBig(block.Number()), gethTracerArg); err != nil { + t.Fatalf("debug_traceBlockByNumber() error = %v", err) + } + if len(block.Transactions()) != 0 && len(traces) == 0 { + t.Errorf("traces should not be empty") + } + }) + } +} + +func benchmarkBlockRetrieval(b *testing.B, ethClient *ethclient.Client, rpcClient *rpc.Client) { + b.ResetTimer() + for j := 0; j < b.N; j++ { + blockTestNumber := int64(20978000 + b.N) + _, err := ethClient.BlockByNumber(context.Background(), big.NewInt(blockTestNumber)) + if err != nil { + b.Fatalf("BlockByNumber() error = %v", err) + } + + if _, err := ethClient.BlockReceipts(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(blockTestNumber))); err != nil { + b.Fatalf("BlockReceipts() error = %v", err) + } + + var traces []GethTraceCallResultWrapper + if err := rpcClient.Call(&traces, "debug_traceBlockByNumber", hexutil.EncodeBig(big.NewInt(blockTestNumber)), gethTracerArg); err != nil { + b.Fatalf("debug_traceBlockByNumber() error = %v", err) + } + } +} + +func BenchmarkErigonNode(b *testing.B) { + node := os.Getenv("ETH1_ERIGON_ENDPOINT") + if node == "" { + b.Skip("skipping test, please set ETH1_ERIGON_ENDPOINT") + } + + rpcClient, err := rpc.DialOptions(context.Background(), node) + if err != nil { + b.Fatal(err) + } + + benchmarkBlockRetrieval(b, ethclient.NewClient(rpcClient), rpcClient) +} + +func BenchmarkRawBigTable(b *testing.B) { + project := os.Getenv("BIGTABLE_PROJECT") + instance := os.Getenv("BIGTABLE_INSTANCE") + if project == "" || instance == "" { + b.Skip("skipping test, set BIGTABLE_PROJECT and BIGTABLE_INSTANCE") + } + + bt, err := store.NewBigTable(project, instance, nil) + if err != nil { + b.Fatal(err) + } + + rawStore := WithCache(NewRawStore(store.Wrap(bt, BlocksRawTable, ""))) + rpcClient, err := rpc.DialOptions(context.Background(), "http://foo.bar", rpc.WithHTTPClient(&http.Client{ + Transport: NewBigTableEthRaw(rawStore, chainID), + })) + if err != nil { + b.Fatal(err) + } + + benchmarkBlockRetrieval(b, ethclient.NewClient(rpcClient), rpcClient) +} + +func BenchmarkAll(b *testing.B) { + b.Run("BenchmarkErigonNode", func(b *testing.B) { + BenchmarkErigonNode(b) + }) + b.Run("BenchmarkRawBigTable", func(b *testing.B) { + BenchmarkRawBigTable(b) + }) +} + +func TestBigTableClient(t *testing.T) { + tests := []struct { + name string + block FullBlockRawData + }{ + { + name: "test block", + block: testFullBlock, + }, + { + name: "two uncles", + block: testTwoUnclesFullBlock, + }, + } + + client, admin := storetest.NewBigTable(t) + bg, err := store.NewBigTableWithClient(context.Background(), client, admin, raw) + if err != nil { + t.Fatal(err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rawStore := NewRawStore(store.Wrap(bg, BlocksRawTable, "")) + if err := rawStore.AddBlocks([]FullBlockRawData{tt.block}); err != nil { + t.Fatal(err) + } + + rpcClient, err := rpc.DialOptions(context.Background(), "http://foo.bar", rpc.WithHTTPClient(&http.Client{ + Transport: NewBigTableEthRaw(WithCache(rawStore), tt.block.ChainID), + })) + if err != nil { + t.Fatal(err) + } + ethClient := ethclient.NewClient(rpcClient) + + block, err := ethClient.BlockByNumber(context.Background(), big.NewInt(tt.block.BlockNumber)) + if err != nil { + t.Fatalf("BlockByNumber() error = %v", err) + } + if got, want := block.Number().Int64(), tt.block.BlockNumber; got != want { + t.Errorf("got %v, want %v", got, want) + } + + receipts, err := ethClient.BlockReceipts(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(tt.block.BlockNumber))) + if err != nil { + t.Fatalf("BlockReceipts() error = %v", err) + } + if len(block.Transactions()) != 0 && len(receipts) == 0 { + t.Errorf("receipts should not be empty") + } + + var traces []GethTraceCallResultWrapper + if err := rpcClient.Call(&traces, "debug_traceBlockByNumber", hexutil.EncodeBig(block.Number()), gethTracerArg); err != nil { + t.Fatalf("debug_traceBlockByNumber() error = %v", err) + } + if len(block.Transactions()) != 0 && len(traces) == 0 { + t.Errorf("traces should not be empty") + } + }) + } +} + +func TestBigTableClientWithFallback(t *testing.T) { + node := os.Getenv("ETH1_ERIGON_ENDPOINT") + if node == "" { + t.Skip("skipping test, set ETH1_ERIGON_ENDPOINT") + } + + tests := []struct { + name string + block FullBlockRawData + }{ + { + name: "test block", + block: testFullBlock, + }, + } + + client, admin := storetest.NewBigTable(t) + bg, err := store.NewBigTableWithClient(context.Background(), client, admin, raw) + if err != nil { + t.Fatal(err) + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rawStore := NewRawStore(store.Wrap(bg, BlocksRawTable, "")) + + rpcClient, err := rpc.DialOptions(context.Background(), node, rpc.WithHTTPClient(&http.Client{ + Transport: NewWithFallback(NewBigTableEthRaw(rawStore, tt.block.ChainID), http.DefaultTransport), + })) + if err != nil { + t.Fatal(err) + } + ethClient := ethclient.NewClient(rpcClient) + + balance, err := ethClient.BalanceAt(context.Background(), common.Address{}, big.NewInt(tt.block.BlockNumber)) + if err != nil { + t.Fatal(err) + } + if balance == nil { + t.Errorf("empty balance") + } + + block, err := ethClient.BlockByNumber(context.Background(), big.NewInt(tt.block.BlockNumber)) + if err != nil { + t.Fatalf("BlockByNumber() error = %v", err) + } + if got, want := block.Number().Int64(), tt.block.BlockNumber; got != want { + t.Errorf("got %v, want %v", got, want) + } + + receipts, err := ethClient.BlockReceipts(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.BlockNumber(tt.block.BlockNumber))) + if err != nil { + t.Fatalf("BlockReceipts() error = %v", err) + } + if len(block.Transactions()) != 0 && len(receipts) == 0 { + t.Errorf("receipts should not be empty") + } + + var traces []GethTraceCallResultWrapper + if err := rpcClient.Call(&traces, "debug_traceBlockByNumber", hexutil.EncodeBig(block.Number()), gethTracerArg); err != nil { + t.Fatalf("debug_traceBlockByNumber() error = %v", err) + } + if len(block.Transactions()) != 0 && len(traces) == 0 { + t.Errorf("traces should not be empty") + } + }) + } +} + +// TODO import those 3 from somewhere +var gethTracerArg = map[string]string{ + "tracer": "callTracer", +} + +type GethTraceCallResultWrapper struct { + Result *GethTraceCallResult `json:"result,omitempty"` +} + +type GethTraceCallResult struct { + TransactionPosition int `json:"transaction_position,omitempty"` + Time string `json:"time,omitempty"` + GasUsed string `json:"gas_used,omitempty"` + From common.Address `json:"from,omitempty"` + To common.Address `json:"to,omitempty"` + Value string `json:"value,omitempty"` + Gas string `json:"gas,omitempty"` + Input string `json:"input,omitempty"` + Output string `json:"output,omitempty"` + Error string `json:"error,omitempty"` + Type string `json:"type,omitempty"` + Calls []*GethTraceCallResult `json:"calls,omitempty"` +} diff --git a/db2/compress.go b/db2/compress.go new file mode 100644 index 0000000000..1c92672f22 --- /dev/null +++ b/db2/compress.go @@ -0,0 +1,48 @@ +package db2 + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" +) + +type gzipCompressor struct { +} + +func (gzipCompressor) compress(src []byte) ([]byte, error) { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + if _, err := zw.Write(src); err != nil { + return nil, fmt.Errorf("gzip cannot compress data: %w", err) + } + if err := zw.Close(); err != nil { + return nil, fmt.Errorf("gzip cannot close writer: %w", err) + } + return buf.Bytes(), nil +} + +func (gzipCompressor) decompress(src []byte) ([]byte, error) { + if len(src) == 0 { + return nil, nil + } + zr, err := gzip.NewReader(bytes.NewReader(src)) + if err != nil { + return nil, fmt.Errorf("gzip cannot create reader: %w", err) + } + data, err := io.ReadAll(zr) + if err != nil { + return nil, fmt.Errorf("gzip cannot read: %w", err) + } + return data, nil +} + +type noOpCompressor struct{} + +func (n noOpCompressor) compress(src []byte) ([]byte, error) { + return src, nil +} + +func (n noOpCompressor) decompress(src []byte) ([]byte, error) { + return src, nil +} diff --git a/db2/hexutil.go b/db2/hexutil.go new file mode 100644 index 0000000000..bfaa2d51e5 --- /dev/null +++ b/db2/hexutil.go @@ -0,0 +1,56 @@ +// Copyright 2016 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package db2 + +import ( + "encoding/hex" + "encoding/json" + "fmt" + "strings" +) + +// Bytes marshals/unmarshals as a JSON string with 0x prefix. +// The empty slice marshals as "0x". +type Bytes []byte + +// UnmarshalJSON implements json.Unmarshaler. +func (b *Bytes) UnmarshalJSON(input []byte) error { + var v string + if err := json.Unmarshal(input, &v); err != nil { + return err + } + + v = strings.Replace(v, "0x", "", 1) + + // make sure to have an even length hex string by prefixing odd strings with a single 0, 0x0 will become 0x00 for example + // while hashes and addresses have always an even length, numbers usually don't + if len(v)%2 != 0 { + v = "0" + v + } + + var err error + *b, err = hex.DecodeString(v) + + if err != nil { + return fmt.Errorf("error decoding %s: %v", string(input), err) + } + return err +} + +func (b *Bytes) String() string { + return fmt.Sprintf("0x%x", *b) +} diff --git a/db2/raw.go b/db2/raw.go new file mode 100644 index 0000000000..7e6d3c1ab3 --- /dev/null +++ b/db2/raw.go @@ -0,0 +1,173 @@ +package db2 + +import ( + "fmt" + "log/slog" + "math/big" + "strings" + + "github.com/gobitfly/eth2-beaconchain-explorer/db2/store" +) + +type compressor interface { + compress(src []byte) ([]byte, error) + decompress(src []byte) ([]byte, error) +} + +type RawStore struct { + store store.Store + compressor compressor +} + +func NewRawStore(store store.Store) RawStore { + return RawStore{ + store: store, + compressor: gzipCompressor{}, + } +} + +func (db RawStore) AddBlocks(blocks []FullBlockRawData) error { + itemsByKey := make(map[string][]store.Item) + for _, fullBlock := range blocks { + if len(fullBlock.Block) == 0 || len(fullBlock.BlockTxs) != 0 && len(fullBlock.Traces) == 0 { + return fmt.Errorf("block %d: empty data", fullBlock.BlockNumber) + } + key := blockKey(fullBlock.ChainID, fullBlock.BlockNumber) + + block, err := db.compressor.compress(fullBlock.Block) + if err != nil { + return fmt.Errorf("cannot compress block %d: %w", fullBlock.BlockNumber, err) + } + receipts, err := db.compressor.compress(fullBlock.Receipts) + if err != nil { + return fmt.Errorf("cannot compress receipts %d: %w", fullBlock.BlockNumber, err) + } + traces, err := db.compressor.compress(fullBlock.Traces) + if err != nil { + return fmt.Errorf("cannot compress traces %d: %w", fullBlock.BlockNumber, err) + } + itemsByKey[key] = []store.Item{ + { + Family: BT_COLUMNFAMILY_BLOCK, + Column: BT_COLUMN_BLOCK, + Data: block, + }, + { + Family: BT_COLUMNFAMILY_RECEIPTS, + Column: BT_COLUMN_RECEIPTS, + Data: receipts, + }, + { + Family: BT_COLUMNFAMILY_TRACES, + Column: BT_COLUMN_TRACES, + Data: traces, + }, + } + if len(fullBlock.Receipts) == 0 { + // todo move that log higher up + slog.Warn(fmt.Sprintf("empty receipts at block %d lRec %d lTxs %d", fullBlock.BlockNumber, len(fullBlock.Receipts), len(fullBlock.BlockTxs))) + } + if fullBlock.BlockUnclesCount > 0 { + uncles, err := db.compressor.compress(fullBlock.Uncles) + if err != nil { + return fmt.Errorf("cannot compress block %d: %w", fullBlock.BlockNumber, err) + } + itemsByKey[key] = append(itemsByKey[key], store.Item{ + Family: BT_COLUMNFAMILY_UNCLES, + Column: BT_COLUMN_UNCLES, + Data: uncles, + }) + } + } + return db.store.BulkAdd(itemsByKey) +} + +func (db RawStore) ReadBlockByNumber(chainID uint64, number int64) (*FullBlockRawData, error) { + return db.readBlock(chainID, number) +} + +func (db RawStore) ReadBlockByHash(chainID uint64, hash string) (*FullBlockRawData, error) { + // todo use sql db to retrieve hash + return nil, fmt.Errorf("ReadBlockByHash not implemented") +} + +func (db RawStore) readBlock(chainID uint64, number int64) (*FullBlockRawData, error) { + key := blockKey(chainID, number) + data, err := db.store.GetRow(key) + if err != nil { + return nil, err + } + return db.parseRow(chainID, number, data) +} + +func (db RawStore) parseRow(chainID uint64, number int64, data map[string][]byte) (*FullBlockRawData, error) { + block, err := db.compressor.decompress(data[fmt.Sprintf("%s:%s", BT_COLUMNFAMILY_BLOCK, BT_COLUMN_BLOCK)]) + if err != nil { + return nil, fmt.Errorf("cannot decompress block %d: %w", number, err) + } + receipts, err := db.compressor.decompress(data[fmt.Sprintf("%s:%s", BT_COLUMNFAMILY_RECEIPTS, BT_COLUMN_RECEIPTS)]) + if err != nil { + return nil, fmt.Errorf("cannot decompress receipts %d: %w", number, err) + } + traces, err := db.compressor.decompress(data[fmt.Sprintf("%s:%s", BT_COLUMNFAMILY_TRACES, BT_COLUMN_TRACES)]) + if err != nil { + return nil, fmt.Errorf("cannot decompress traces %d: %w", number, err) + } + uncles, err := db.compressor.decompress(data[fmt.Sprintf("%s:%s", BT_COLUMNFAMILY_UNCLES, BT_COLUMN_UNCLES)]) + if err != nil { + return nil, fmt.Errorf("cannot decompress uncles %d: %w", number, err) + } + return &FullBlockRawData{ + ChainID: chainID, + BlockNumber: number, + BlockHash: nil, + BlockUnclesCount: 0, + BlockTxs: nil, + Block: block, + Receipts: receipts, + Traces: traces, + Uncles: uncles, + }, nil +} + +func (db RawStore) ReadBlocksByNumber(chainID uint64, start, end int64) ([]*FullBlockRawData, error) { + rows, err := db.store.GetRowsRange(blockKey(chainID, start), blockKey(chainID, end)) + if err != nil { + return nil, err + } + blocks := make([]*FullBlockRawData, 0, end-start+1) + for key, data := range rows { + number := blockKeyToNumber(chainID, key) + block, err := db.parseRow(chainID, number, data) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + return blocks, nil +} + +func blockKey(chainID uint64, number int64) string { + return fmt.Sprintf("%d:%12d", chainID, MAX_EL_BLOCK_NUMBER-number) +} + +func blockKeyToNumber(chainID uint64, key string) int64 { + key = strings.TrimPrefix(key, fmt.Sprintf("%d:", chainID)) + reversed, _ := new(big.Int).SetString(key, 10) + + return MAX_EL_BLOCK_NUMBER - reversed.Int64() +} + +type FullBlockRawData struct { + ChainID uint64 + + BlockNumber int64 + BlockHash Bytes + BlockUnclesCount int + BlockTxs []string + + Block Bytes + Receipts Bytes + Traces Bytes + Uncles Bytes +} diff --git a/db2/raw_test.go b/db2/raw_test.go new file mode 100644 index 0000000000..011ef64dfb --- /dev/null +++ b/db2/raw_test.go @@ -0,0 +1,438 @@ +package db2 + +import ( + "context" + "testing" + + "github.com/gobitfly/eth2-beaconchain-explorer/db2/store" + "github.com/gobitfly/eth2-beaconchain-explorer/db2/storetest" +) + +func TestRaw(t *testing.T) { + client, admin := storetest.NewBigTable(t) + + s, err := store.NewBigTableWithClient(context.Background(), client, admin, raw) + if err != nil { + t.Fatal(err) + } + + db := RawStore{ + store: store.Wrap(s, BlocksRawTable, ""), + compressor: noOpCompressor{}, + } + + block := FullBlockRawData{ + ChainID: 1, + BlockNumber: testBlockNumber, + BlockHash: nil, + BlockUnclesCount: 1, + BlockTxs: nil, + Block: []byte(testBlock), + Receipts: []byte(testReceipts), + Traces: []byte(testTraces), + Uncles: []byte(testUncles), + } + + if err := db.AddBlocks([]FullBlockRawData{block}); err != nil { + t.Fatal(err) + } + + res, err := db.ReadBlockByNumber(block.ChainID, block.BlockNumber) + if err != nil { + t.Fatal(err) + } + + if got, want := string(res.Block), testBlock; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := string(res.Receipts), testReceipts; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := string(res.Traces), testTraces; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := string(res.Uncles), testUncles; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +var testFullBlock = FullBlockRawData{ + ChainID: 1, + BlockNumber: testBlockNumber, + BlockUnclesCount: 1, + Block: []byte(testBlock), + Receipts: []byte(testReceipts), + Traces: []byte(testTraces), + Uncles: []byte(testUncles), +} + +var testTwoUnclesFullBlock = FullBlockRawData{ + ChainID: 1, + BlockNumber: testTwoUnclesBlockNumber, + BlockUnclesCount: 2, + Block: []byte(testTwoUnclesBlock), + Receipts: nil, + Traces: nil, + Uncles: []byte(testTwoUnclesBlockUncles), +} + +const ( + testBlockNumber = 6008149 + testBlock = `{ + "id":1, + "jsonrpc":"2.0", + "result":{ + "difficulty":"0xbfabcdbd93dda", + "extraData":"0x737061726b706f6f6c2d636e2d6e6f64652d3132", + "gasLimit":"0x79f39e", + "gasUsed":"0x79ccd3", + "hash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "logsBloom":"0x4848112002a2020aaa0812180045840210020005281600c80104264300080008000491220144461026015300100000128005018401002090a824a4150015410020140400d808440106689b29d0280b1005200007480ca950b15b010908814e01911000054202a020b05880b914642a0000300003010044044082075290283516be82504082003008c4d8d14462a8800c2990c88002a030140180036c220205201860402001014040180002006860810ec0a1100a14144148408118608200060461821802c081000042d0810104a8004510020211c088200420822a082040e10104c00d010064004c122692020c408a1aa2348020445403814002c800888208b1", + "miner":"0x5a0b54d5dc17e0aadc383d2db43b0a0d3e029c4c", + "mixHash":"0x3d1fdd16f15aeab72e7db1013b9f034ee33641d92f71c0736beab4e67d34c7a7", + "nonce":"0x4db7a1c01d8a8072", + "number":"0x5bad55", + "parentHash":"0x61a8ad530a8a43e3583f8ec163f773ad370329b2375d66433eb82f005e1d6202", + "receiptsRoot":"0x5eced534b3d84d3d732ddbc714f5fd51d98a941b28182b6efe6df3a0fe90004b", + "sha3Uncles":"0x8a562e7634774d3e3a36698ac4915e37fc84a2cd0044cb84fa5d80263d2af4f6", + "size":"0x41c7", + "stateRoot":"0xf5208fffa2ba5a3f3a2f64ebd5ca3d098978bedd75f335f56b705d8715ee2305", + "timestamp":"0x5b541449", + "totalDifficulty":"0x12ac11391a2f3872fcd", + "transactions":[ + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "chainId":"0x1", + "from":"0xfbb1b73c4f0bda4f67dca266ce6ef42f520fbb98", + "gas":"0x249f0", + "gasPrice":"0x174876e800", + "hash":"0x8784d99762bccd03b2086eabccee0d77f14d05463281e121a62abfebcf0d2d5f", + "input":"0x6ea056a9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000bd8d7fa6f8cc00", + "nonce":"0x5e4724", + "r":"0xd1556332df97e3bd911068651cfad6f975a30381f4ff3a55df7ab3512c78b9ec", + "s":"0x66b51cbb10cd1b2a09aaff137d9f6d4255bf73cb7702b666ebd5af502ffa4410", + "to":"0x4b9c25ca0224aef6a7522cabdbc3b2e125b7ca50", + "transactionIndex":"0x0", + "type":"0x0", + "v":"0x25", + "value":"0x0" + }, + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "chainId":"0x1", + "from":"0xc837f51a0efa33f8eca03570e3d01a4b2cf97ffd", + "gas":"0x15f90", + "gasPrice":"0x14b8d03a00", + "hash":"0x311be6a9b58748717ac0f70eb801d29973661aaf1365960d159e4ec4f4aa2d7f", + "input":"0x", + "nonce":"0x4241", + "r":"0xe9ef2f6fcff76e45fac6c2e8080094370082cfb47e8fde0709312f9aa3ec06ad", + "s":"0x421ebc4ebe187c173f13b1479986dcbff5c4997c0dfeb1fd149a982ad4bcdfe7", + "to":"0xf49bd0367d830850456d2259da366a054038dc46", + "transactionIndex":"0x1", + "type":"0x0", + "v":"0x25", + "value":"0x1bafa9ee16e78000" + }, + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "chainId":"0x1", + "from":"0x532a2bae845abe7e5115808b832d34f9c3d41eed", + "gas":"0x910c", + "gasPrice":"0xe6f7cec00", + "hash":"0xe42b0256058b7cad8a14b136a0364acda0b4c36f5b02dea7e69bfd82cef252a2", + "input":"0xa9059cbb000000000000000000000000398a58b2e3790431fdac1ea56017e65401fa998800000000000000000000000000000000000000000007bcadb57b861109080000", + "nonce":"0x0", + "r":"0x4e3fdc1ad7ac52439791a8a48bc8ed70040170fa9c4b6cef6317f63d45e9a142", + "s":"0x6e5feaefdbc8f99c5d036b31d6386fb49c1a97812f13d48742a1b77b7e690858", + "to":"0x818fc6c2ec5986bc6e2cbf00939d90556ab12ce5", + "transactionIndex":"0x2", + "type":"0x0", + "v":"0x26", + "value":"0x0" + }, + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "from":"0x2a9847093ad514639e8cdec960b5e51686960291", + "gas":"0x4f588", + "gasPrice":"0xc22a75840", + "hash":"0x4eb05376055c6456ed883fc843bc43df1dcf739c321ba431d518aecd7f98ca11", + "input":"0x000101fa27134d5320", + "nonce":"0xd50", + "r":"0x980e463d70e67c49477883a55cdb42829c9e5746e95d63b738d7390c7d685551", + "s":"0x647babbe3a96df447da960812c88833c6b5aa009f1c361c6adae818100d15007", + "to":"0xc7ed8919c70dd8ccf1a57c0ed75b25ceb2dd22d1", + "transactionIndex":"0x3", + "type":"0x0", + "v":"0x1b", + "value":"0x0" + }, + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "chainId":"0x1", + "from":"0xe12c32af0ca83fe12c58b1daef82ebe6333f7b10", + "gas":"0x5208", + "gasPrice":"0xba43b7400", + "hash":"0x994dd9e72b212b7dc5fd0466ab75adf7d391cf4f206a65b7ad2a1fd032bb06d7", + "input":"0x", + "nonce":"0x1d", + "r":"0xedf9e958bbd3f7d2fd9831678a3166cf0373a4436a63c152c4aa84f864bb7e6e", + "s":"0xacbc0cfcc7d3264de55c0af45b6c280ea237e28d273f7ddba0eea05204c2101", + "to":"0x5343222c6f7e2af4d9d3e844fb8f3f18f7be0e55", + "transactionIndex":"0x4", + "type":"0x0", + "v":"0x26", + "value":"0x31f64d59c01f6000" + }, + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "chainId":"0x1", + "from":"0x80c779504c3a3a39dbd0356f5d8e851cb6dbba0a", + "gas":"0x57e40", + "gasPrice":"0x9c35a3cc8", + "hash":"0xf6feecbb9ab0ac58591a4bc287059b1133089c499517e91a274e6a1f5e7dce53", + "input":"0x010b01000d0670", + "nonce":"0xb9bf", + "r":"0x349d0601e24f0128ecfce3665edd2a0727a043fa62ccf587fded784aed46c3f6", + "s":"0x77127ddf76cb2b9e12074006a504fbd6893d5bd29a18a8efb193907f4565404", + "to":"0x3714e5671be406fc1920351984f4429237831477", + "transactionIndex":"0x5", + "type":"0x0", + "v":"0x26", + "value":"0x0" + }, + { + "blockHash":"0xb3b20624f8f0f86eb50dd04688409e5cea4bd02d700bf6e79e9384d47d6a5a35", + "blockNumber":"0x5bad55", + "chainId":"0x1", + "from":"0xc5b373618d4d01a38f822f56ca6d2ff5080cc4f2", + "gas":"0x4f588", + "gasPrice":"0x9c355a8e8", + "hash":"0x7e537d687a5525259480440c6ea2e1a8469cd98906eaff8597f3d2a44422ff97", + "input":"0x0108e9000d0670136b", + "nonce":"0x109f3", + "r":"0x7736e11b03c6702eb6aaea8c45ed6b8a510878bb7741028d82938b9207448e9b", + "s":"0x70bcd4c0ec2b0c67eb9eefb53a6ff7e114b45888589a5aaf4c1a1f00fa704775", + "to":"0xc5f60fa4613493931b605b6da1e9febbdeb61e16", + "transactionIndex":"0x6", + "type":"0x0", + "v":"0x25", + "value":"0x0" + } + ], + "transactionsRoot":"0xf98631e290e88f58a46b7032f025969039aa9b5696498efc76baf436fa69b262", + "uncles":[ + "0x824cce7c7c2ec6874b9fa9a9a898eb5f27cbaf3991dfa81084c3af60d1db618c" + ] + } +}` + testTraces = `{ + "jsonrpc": "2.0", + "id": 1, + "result": [ + { + "result": { + "from": "0xa5ba45f484bc67fe293cf01f7d92d5ba3514dd42", + "gas": "0x5208", + "gasUsed": "0x5208", + "input": "0x", + "to": "0x45a318273749d6eb00f5f6ca3bc7cd3de26d642a", + "type": "CALL", + "value": "0x2ca186f5fda8004" + } + }, + { + "result": { + "from": "0x25f2650cc9e8ad863bf5da6a7598e24271574e29", + "gas": "0xfe0e", + "gasUsed": "0xafee", + "input": "0xd0e30db0", + "to": "0xe5d7c2a44ffddf6b295a15c148167daaaf5cf34f", + "type": "CALL", + "value": "0x2386f26fc10000" + } + } + ] +}` + testReceipts = `{ + "jsonrpc": "2.0", + "id": 1, + "result": [ + { + "blockHash": "0x19514ce955c65e4dd2cd41f435a75a46a08535b8fc16bc660f8092b32590b182", + "blockNumber": "0x6f55", + "contractAddress": null, + "cumulativeGasUsed": "0x18c36", + "from": "0x22896bfc68814bfd855b1a167255ee497006e730", + "gasUsed": "0x18c36", + "effectiveGasPrice": "0x9502f907", + "logs": [ + { + "address": "0xfd584430cafa2f451b4e2ebcf3986a21fff04350", + "topics": [ + "0x2f8788117e7eff1d82e926ec794901d17c78024a50270940304540a733656f0d", + "0x4be29e0e4eb91f98f709d98803cba271592782e293b84a625e025cbb40197ba8", + "0x000000000000000000000000835281a2563db4ebf1b626172e085dc406bfc7d2", + "0x00000000000000000000000022896bfc68814bfd855b1a167255ee497006e730" + ], + "data": "0x", + "blockNumber": "0x6f55", + "transactionHash": "0x4a481e4649da999d92db0585c36cba94c18a33747e95dc235330e6c737c6f975", + "transactionIndex": "0x0", + "blockHash": "0x19514ce955c65e4dd2cd41f435a75a46a08535b8fc16bc660f8092b32590b182", + "logIndex": "0x0", + "removed": false + } + ], + "logsBloom": "0x00000004000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000080020000000000000200010000000000000000000001000000800000000000000000000000000000000000000000000000000000100100000000000000000000008000000000000000000000000000000002000000000000000000000", + "status": "0x1", + "to": "0xfd584430cafa2f451b4e2ebcf3986a21fff04350", + "transactionHash": "0x4a481e4649da999d92db0585c36cba94c18a33747e95dc235330e6c737c6f975", + "transactionIndex": "0x0", + "type": "0x0" + }, + { + "blockHash": "0x19514ce955c65e4dd2cd41f435a75a46a08535b8fc16bc660f8092b32590b182", + "blockNumber": "0x6f55", + "contractAddress": null, + "cumulativeGasUsed": "0x1de3e", + "from": "0x712e3a792c974b3e3dbe41229ad4290791c75a82", + "gasUsed": "0x5208", + "effectiveGasPrice": "0x9502f907", + "logs": [], + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "status": "0x1", + "to": "0xd42e2b1c14d02f1df5369a9827cb8e6f3f75f338", + "transactionHash": "0xefb83b4e3f1c317e8da0f8e2fbb2fe964f34ee184466032aeecac79f20eacaf6", + "transactionIndex": "0x1", + "type": "0x2" + } + ] +}` + testUncles = `[{ + "jsonrpc": "2.0", + "id": 1, + "result": { + "difficulty": "0x57f117f5c", + "extraData": "0x476574682f76312e302e302f77696e646f77732f676f312e342e32", + "gasLimit": "0x1388", + "gasUsed": "0x0", + "hash": "0x932bdf904546a2287a2c9b2ede37925f698a7657484b172d4e5184f80bdd464d", + "logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner": "0x5bf5e9cf9b456d6591073513de7fd69a9bef04bc", + "mixHash": "0x4500aa4ee2b3044a155252e35273770edeb2ab6f8cb19ca8e732771484462169", + "nonce": "0x24732773618192ac", + "number": "0x299", + "parentHash": "0xa779859b1ee558258b7008bbabff272280136c5dd3eb3ea3bfa8f6ae03bf91e5", + "receiptsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size": "0x21d", + "stateRoot": "0x2604fbf5183f5360da249b51f1b9f1e0f315d2ff3ffa1a4143ff221ad9ca1fec", + "timestamp": "0x55ba4827", + "totalDifficulty": null, + "transactionsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles": [] + } +}]` + + testTwoUnclesBlockNumber = 141 + testTwoUnclesBlock = `{ + "jsonrpc":"2.0", + "id":0, + "result":{ + "difficulty":"0x4417decf7", + "extraData":"0x426974636f696e2069732054484520426c6f636b636861696e2e", + "gasLimit":"0x1388", + "gasUsed":"0x0", + "hash":"0xeafbe76fdcadc1b69ba248589eb2a674b60b00c84374c149c9deaf5596183932", + "logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner":"0x1b7047b4338acf65be94c1a3e8c5c9338ad7d67c", + "mixHash":"0x21eabda67c3151855389a5a968e50daa7b356b3046e2f119ef46c97d204a541e", + "nonce":"0x85378a3fc5e608e1", + "number":"0x8d", + "parentHash":"0xe2c1e8200ef2e9fba09979f0b504dc52c068719623c7064904c7bd3e9365acc1", + "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles":"0x393f5f01182846b91386f8b00759fd54f83998a6a1064b8ac72fc8eca1bcf81b", + "size":"0x653", + "stateRoot":"0x3e1eea9a01178945535230b6f5839201f594d9be20618bb4edaa383f4f0c850f", + "timestamp":"0x55ba4444", + "totalDifficulty":"0x24826e73469", + "transactions":[ + + ], + "transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles":[ + "0x61beeeb3e11e89d19fed2e988c8017b55c3ddb8895f531072363ce2abaf56b95", + "0xf84d9d74415364c3a7569f315ff831b910968c7dd637fffaab51278c9e7f9306" + ] + } +}` + testTwoUnclesBlockUncles = `[ + { + "jsonrpc":"2.0", + "id":141, + "result":{ + "difficulty":"0x4406dc086", + "extraData":"0x476574682f4c5649562f76312e302e302f6c696e75782f676f312e342e32", + "gasLimit":"0x1388", + "gasUsed":"0x0", + "hash":"0x61beeeb3e11e89d19fed2e988c8017b55c3ddb8895f531072363ce2abaf56b95", + "logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner":"0xbb7b8287f3f0a933474a79eae42cbca977791171", + "mixHash":"0x87547a998fe63f18b36180ca918131b6b20fc5d67390e2ac2f66be3fee8fb7d2", + "nonce":"0x1dc5b79704350bee", + "number":"0x8b", + "parentHash":"0x2253b8f79c23b6ff67cb2ef6fabd9ec59e1edf2d07c16d98a19378041f96624d", + "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size":"0x21f", + "stateRoot":"0x940131b162b07452ea31b5335c4dedfdddc13338142f71f261d51dea664033b4", + "timestamp":"0x55ba4441", + "totalDifficulty":"0x24826e73469", + "transactions":[ + + ], + "transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles":[ + + ] + } + }, + { + "jsonrpc":"2.0", + "id":141, + "result":{ + "difficulty":"0x4406dc086", + "extraData":"0x476574682f6b6c6f737572652f76312e302e302d66633739643332642f6c696e", + "gasLimit":"0x1388", + "gasUsed":"0x0", + "hash":"0xf84d9d74415364c3a7569f315ff831b910968c7dd637fffaab51278c9e7f9306", + "logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "miner":"0xd7e30ae310c1d1800f5b641baa7af95b2e1fd98c", + "mixHash":"0x6039f236ebb70ec71091df5770aef0f0faa13ef334c4c68daaffbfdf7961a3d3", + "nonce":"0x7d8ec05d330e6e99", + "number":"0x8b", + "parentHash":"0x2253b8f79c23b6ff67cb2ef6fabd9ec59e1edf2d07c16d98a19378041f96624d", + "receiptsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "sha3Uncles":"0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", + "size":"0x221", + "stateRoot":"0x302bb7708752013f46f009dec61cad586c35dc185d20cdde0071b7487f7c2008", + "timestamp":"0x55ba4440", + "totalDifficulty":"0x24826e73469", + "transactions":[ + + ], + "transactionsRoot":"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421", + "uncles":[ + + ] + } + } +]` +) diff --git a/db2/store/bigtable.go b/db2/store/bigtable.go new file mode 100644 index 0000000000..6e55c31b0d --- /dev/null +++ b/db2/store/bigtable.go @@ -0,0 +1,351 @@ +package store + +import ( + "context" + "fmt" + "slices" + "strings" + "time" + + "cloud.google.com/go/bigtable" + "golang.org/x/exp/maps" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var ErrNotFound = fmt.Errorf("not found") + +const ( + timeout = time.Minute // Timeout duration for Bigtable operations + maxRetries = 5 +) + +type TableWrapper struct { + *BigTableStore + table string + family string +} + +func Wrap(db *BigTableStore, table string, family string) TableWrapper { + return TableWrapper{ + BigTableStore: db, + table: table, + family: family, + } +} + +func (w TableWrapper) Add(key, column string, data []byte, allowDuplicate bool) error { + return w.BigTableStore.Add(w.table, w.family, key, column, data, allowDuplicate) +} + +func (w TableWrapper) Read(prefix string) ([][]byte, error) { + return w.BigTableStore.Read(w.table, w.family, prefix) +} + +func (w TableWrapper) GetLatestValue(key string) ([]byte, error) { + return w.BigTableStore.GetLatestValue(w.table, w.family, key) +} + +func (w TableWrapper) GetRow(key string) (map[string][]byte, error) { + return w.BigTableStore.GetRow(w.table, key) +} + +func (w TableWrapper) GetRowKeys(prefix string) ([]string, error) { + return w.BigTableStore.GetRowKeys(w.table, prefix) +} + +func (w TableWrapper) BulkAdd(itemsByKey map[string][]Item) error { + return w.BigTableStore.BulkAdd(w.table, itemsByKey) +} + +func (w TableWrapper) GetRowsRange(high, low string) (map[string]map[string][]byte, error) { + return w.BigTableStore.GetRowsRange(w.table, high, low) +} + +// BigTableStore is a wrapper around Google Cloud Bigtable for storing and retrieving data +type BigTableStore struct { + client *bigtable.Client + admin *bigtable.AdminClient + + maxRetries int +} + +func NewBigTableWithClient(ctx context.Context, client *bigtable.Client, adminClient *bigtable.AdminClient, tablesAndFamilies map[string][]string) (*BigTableStore, error) { + // Initialize the Bigtable table and column family + if err := initTable(ctx, adminClient, tablesAndFamilies); err != nil { + return nil, err + } + + return &BigTableStore{client: client, admin: adminClient, maxRetries: maxRetries}, nil +} + +// NewBigTable initializes a new BigTableStore +// It returns a BigTableStore and an error if any part of the setup fails +func NewBigTable(project, instance string, tablesAndFamilies map[string][]string) (*BigTableStore, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Create an admin client to manage Bigtable tables + adminClient, err := bigtable.NewAdminClient(ctx, project, instance) + if err != nil { + return nil, fmt.Errorf("could not create admin client: %v", err) + } + + // Create a Bigtable client for performing data operations + client, err := bigtable.NewClient(ctx, project, instance) + if err != nil { + return nil, fmt.Errorf("could not create data operations client: %v", err) + } + + return NewBigTableWithClient(ctx, client, adminClient, tablesAndFamilies) +} + +// initTable creates the tables and column family in the Bigtable +func initTable(ctx context.Context, adminClient *bigtable.AdminClient, tablesAndFamilies map[string][]string) error { + for table, families := range tablesAndFamilies { + if err := createTableAndFamilies(ctx, adminClient, table, families...); err != nil { + return err + } + } + return nil +} + +func createTableAndFamilies(ctx context.Context, admin *bigtable.AdminClient, tableName string, familyNames ...string) error { + // Get the list of existing tables + tables, err := admin.Tables(ctx) + if err != nil { + return fmt.Errorf("could not fetch table list: %v", err) + } + + // Create the table if it doesn't exist + if !slices.Contains(tables, tableName) { + if err := admin.CreateTable(ctx, tableName); err != nil { + return fmt.Errorf("could not create table %s: %v", tableName, err) + } + } + + // Retrieve information about the table + tblInfo, err := admin.TableInfo(ctx, tableName) + if err != nil { + return fmt.Errorf("could not read info for table %s: %v", tableName, err) + } + + for _, familyName := range familyNames { + // Create the column family if it doesn't exist + if !slices.Contains(tblInfo.Families, familyName) { + if err := admin.CreateColumnFamily(ctx, tableName, familyName); err != nil { + return fmt.Errorf("could not create column family %s: %v", familyName, err) + } + } + } + return nil +} + +type Item struct { + Family string + Column string + Data []byte +} + +func (b BigTableStore) BulkAdd(table string, itemsByKey map[string][]Item) error { + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var muts []*bigtable.Mutation + for _, items := range itemsByKey { + mut := bigtable.NewMutation() + for _, item := range items { + mut.Set(item.Family, item.Column, bigtable.Timestamp(0), item.Data) + } + muts = append(muts, mut) + } + errs, err := tbl.ApplyBulk(ctx, maps.Keys(itemsByKey), muts) + if err != nil { + return fmt.Errorf("cannot ApplyBulk err: %w", err) + } + // TODO aggregate errs + for _, e := range errs { + return fmt.Errorf("cannot ApplyBulk elem err: %w", e) + } + return nil +} + +// Add inserts a new row with the given key, column, and data into the Bigtable +// It applies a mutation that stores data in the receiver column family +// It returns error if the operation fails +func (b BigTableStore) Add(table, family string, key string, column string, data []byte, allowDuplicate bool) error { + // Open the transfer table for data operations + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + // Create a new mutation to store data in the given column + mut := bigtable.NewMutation() + mut.Set(family, column, bigtable.Now(), data) + + if !allowDuplicate { + mut = bigtable.NewCondMutation(bigtable.RowKeyFilter(key), nil, mut) + } + // Apply the mutation to the table using the given key + if err := tbl.Apply(ctx, key, mut); err != nil { + return fmt.Errorf("could not apply row mutation: %v", err) + } + return nil +} + +// Read retrieves all rows from the Bigtable's receiver column family +// It returns the data in the form of a 2D byte slice and an error if the operation fails +func (b BigTableStore) Read(table, family, prefix string) ([][]byte, error) { + // Open the transfer table for reading + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var data [][]byte + // Read all rows from the table and collect values from the receiver column family + err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool { + for _, item := range row[family] { + // Append each value from the receiver family to the data slice + data = append(data, item.Value) + } + return true + }) + if err != nil { + return nil, fmt.Errorf("could not read rows: %v", err) + } + + return data, nil +} + +func (b BigTableStore) GetLatestValue(table, family, key string) ([]byte, error) { + // Open the transfer table for reading + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var data []byte + err := tbl.ReadRows(ctx, bigtable.PrefixRange(key), func(row bigtable.Row) bool { + data = row[family][0].Value + return true + }) + + if err != nil { + return nil, fmt.Errorf("could not read rows: %v", err) + } + + return data, nil +} + +func (b BigTableStore) GetRow(table, key string) (map[string][]byte, error) { + // Open the transfer table for reading + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + data := make(map[string][]byte) + err := tbl.ReadRows(ctx, bigtable.PrefixRange(key), func(row bigtable.Row) bool { + for _, family := range row { + for _, item := range family { + data[item.Column] = item.Value + } + } + return true + }) + + if err != nil { + return nil, fmt.Errorf("could not read rows: %v", err) + } + if len(data) == 0 { + return nil, ErrNotFound + } + + return data, nil +} + +func (b BigTableStore) GetRowsRange(table, high, low string) (map[string]map[string][]byte, error) { + var err error + var data map[string]map[string][]byte + for i := 0; i < b.maxRetries; i++ { + data, err = b.getRowsRange(table, high, low) + if err == nil { + if len(data) == 0 { + return nil, ErrNotFound + } + return data, nil + } + // return directly if error is not grpc Internal + if !strings.Contains(err.Error(), codes.Internal.String()) { + return nil, fmt.Errorf("could not read rows: %v", err) + } + } + return nil, fmt.Errorf("could not get rows after %d tries: %v", b.maxRetries, err) +} + +func (b BigTableStore) getRowsRange(table, high, low string) (map[string]map[string][]byte, error) { + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + rowRange := bigtable.NewClosedRange(low, high) + data := make(map[string]map[string][]byte) + err := tbl.ReadRows(ctx, rowRange, func(row bigtable.Row) bool { + data[row.Key()] = make(map[string][]byte) + for _, family := range row { + for _, item := range family { + data[row.Key()][item.Column] = item.Value + } + } + return true + }) + + return data, err +} + +func (b BigTableStore) GetRowKeys(table, prefix string) ([]string, error) { + tbl := b.client.Open(table) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + var data []string + err := tbl.ReadRows(ctx, bigtable.PrefixRange(prefix), func(row bigtable.Row) bool { + data = append(data, row.Key()) + return true + }) + + if err != nil { + return nil, fmt.Errorf("could not read rows: %v", err) + } + + return data, nil +} + +func (b BigTableStore) Clear() error { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + tables, err := b.admin.Tables(ctx) + if err != nil { + return err + } + for _, table := range tables { + if err := b.admin.DropAllRows(ctx, table); err != nil { + return fmt.Errorf("could not drop all rows: %v", err) + } + } + return nil +} + +// Close shuts down the BigTableStore by closing the Bigtable client connection +// It returns an error if the operation fails +func (b BigTableStore) Close() error { + if err := b.client.Close(); err != nil && status.Code(err) != codes.Canceled { + return fmt.Errorf("could not close client: %v", err) + } + if b.admin != nil { + if err := b.admin.Close(); err != nil && status.Code(err) != codes.Canceled { + return fmt.Errorf("could not close admin client: %v", err) + } + } + return nil +} diff --git a/db2/store/bigtable_test.go b/db2/store/bigtable_test.go new file mode 100644 index 0000000000..55fdc2047f --- /dev/null +++ b/db2/store/bigtable_test.go @@ -0,0 +1,192 @@ +package store + +import ( + "context" + "slices" + "strings" + "testing" + + "github.com/gobitfly/eth2-beaconchain-explorer/db2/storetest" +) + +func TestBigTableStore(t *testing.T) { + type item struct { + key string + column string + data string + } + tests := []struct { + name string + bulk bool + items []item + expected []string + }{ + { + name: "simple add", + items: []item{{ + key: "foo", + column: "bar", + data: "foobar", + }}, + expected: []string{"foobar"}, + }, + { + name: "bulk add", + bulk: true, + items: []item{{ + key: "key1", + column: "col1", + data: "foobar", + }, { + key: "key2", + column: "col2", + data: "foobar", + }, { + key: "key3", + column: "col3", + data: "foobar", + }}, + expected: []string{"foobar", "foobar", "foobar"}, + }, + { + name: "dont duplicate", + items: []item{{ + key: "foo", + column: "bar", + data: "foobar", + }, { + key: "foo", + column: "bar", + data: "foobar", + }}, + expected: []string{"foobar"}, + }, + { + name: "with a prefix", + items: []item{{ + key: "foo", + }, { + key: "foofoo", + }, { + key: "foofoofoo", + }, { + key: "bar", + }}, + expected: []string{"", "", "", ""}, + }, + } + tables := map[string][]string{"testTable": {"testFamily"}} + client, admin := storetest.NewBigTable(t) + store, err := NewBigTableWithClient(context.Background(), client, admin, tables) + if err != nil { + t.Fatal(err) + } + db := Wrap(store, "testTable", "testFamily") + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + defer func() { + _ = db.Clear() + }() + + if tt.bulk { + itemsByKey := make(map[string][]Item) + for _, item := range tt.items { + itemsByKey[item.key] = append(itemsByKey[item.key], Item{ + Family: "testFamily", + Column: item.column, + Data: []byte(item.data), + }) + } + if err := db.BulkAdd(itemsByKey); err != nil { + t.Error(err) + } + } else { + for _, it := range tt.items { + if err := db.Add(it.key, it.column, []byte(it.data), false); err != nil { + t.Error(err) + } + } + } + + t.Run("Read", func(t *testing.T) { + res, err := db.Read("") + if err != nil { + t.Error(err) + } + if got, want := len(res), len(tt.expected); got != want { + t.Errorf("got %v want %v", got, want) + } + for _, data := range res { + if !slices.Contains(tt.expected, string(data)) { + t.Errorf("wrong data %s", data) + } + } + }) + + t.Run("GetLatestValue", func(t *testing.T) { + for _, it := range tt.items { + v, err := db.GetLatestValue(it.key) + if err != nil { + t.Error(err) + } + if got, want := string(v), it.data; got != want { + t.Errorf("got %v want %v", got, want) + } + } + }) + + t.Run("GetRowKeys", func(t *testing.T) { + for _, it := range tt.items { + keys, err := db.GetRowKeys(it.key) + if err != nil { + t.Error(err) + } + count, found := 0, false + for _, expected := range tt.items { + if !strings.HasPrefix(expected.key, it.key) { + continue + } + // don't count duplicate inputs since the add prevent duplicate keys + if expected.key == it.key && found { + continue + } + found = expected.key == it.key + count++ + if !slices.Contains(keys, expected.key) { + t.Errorf("missing %v in %v", expected.key, keys) + } + } + if got, want := len(keys), count; got != want { + t.Errorf("got %v want %v", got, want) + } + } + }) + }) + } + + if err := db.Close(); err != nil { + t.Errorf("cannot close db: %v", err) + } +} + +func TestRangeIncludeLimits(t *testing.T) { + tables := map[string][]string{"testTable": {"testFamily"}} + client, admin := storetest.NewBigTable(t) + store, err := NewBigTableWithClient(context.Background(), client, admin, tables) + if err != nil { + t.Fatal(err) + } + db := Wrap(store, "testTable", "testFamily") + + db.Add("1:999999999999", "", []byte("0"), false) + db.Add("1:999999999998", "", []byte("1"), false) + + rows, err := db.GetRowsRange("1:999999999999", "1:999999999998") + if err != nil { + t.Fatal(err) + } + if got, want := len(rows), 2; got != want { + t.Errorf("got %v want %v", got, want) + } +} diff --git a/db2/store/remote.go b/db2/store/remote.go new file mode 100644 index 0000000000..444c55c7cf --- /dev/null +++ b/db2/store/remote.go @@ -0,0 +1,171 @@ +package store + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" +) + +const ( + routeGetRowsRange = "/rowRange" + routeGetRow = "/row" +) + +type RemoteServer struct { + store Store +} + +func NewRemoteStore(store Store) RemoteServer { + return RemoteServer{store: store} +} + +func (api RemoteServer) Routes() http.Handler { + mux := http.NewServeMux() + mux.HandleFunc(routeGetRowsRange, api.GetRowsRange) + mux.HandleFunc(routeGetRow, api.GetRow) + + return mux +} + +type ParamsGetRowsRange struct { + High string `json:"high"` + Low string `json:"low"` +} + +func (api RemoteServer) GetRowsRange(w http.ResponseWriter, r *http.Request) { + var args ParamsGetRowsRange + err := json.NewDecoder(r.Body).Decode(&args) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(err.Error())) + return + } + rows, err := api.store.GetRowsRange(args.High, args.Low) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + data, _ := json.Marshal(rows) + _, _ = w.Write(data) +} + +type ParamsGetRow struct { + Key string `json:"key"` +} + +func (api RemoteServer) GetRow(w http.ResponseWriter, r *http.Request) { + var args ParamsGetRow + err := json.NewDecoder(r.Body).Decode(&args) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + _, _ = w.Write([]byte(err.Error())) + return + } + row, err := api.store.GetRow(args.Key) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + _, _ = w.Write([]byte(err.Error())) + return + } + data, _ := json.Marshal(row) + _, _ = w.Write(data) +} + +type RemoteClient struct { + url string +} + +func NewRemoteClient(url string) *RemoteClient { + return &RemoteClient{url: url} +} + +func (r RemoteClient) Add(key, column string, data []byte, allowDuplicate bool) error { + //TODO implement me + panic("implement me") +} + +func (r RemoteClient) BulkAdd(itemsByKey map[string][]Item) error { + //TODO implement me + panic("implement me") +} + +func (r RemoteClient) Read(prefix string) ([][]byte, error) { + //TODO implement me + panic("implement me") +} + +func (r RemoteClient) GetRow(key string) (map[string][]byte, error) { + b, err := json.Marshal(ParamsGetRow{Key: key}) + if err != nil { + return nil, err + } + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", r.url, routeGetRow), bytes.NewReader(b)) + if err != nil { + return nil, err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, b) + } + var row map[string][]byte + if err := json.NewDecoder(resp.Body).Decode(&row); err != nil { + return nil, err + } + return row, nil +} + +func (r RemoteClient) GetRowKeys(prefix string) ([]string, error) { + //TODO implement me + panic("implement me") +} + +func (r RemoteClient) GetLatestValue(key string) ([]byte, error) { + //TODO implement me + panic("implement me") +} + +func (r RemoteClient) GetRowsRange(high, low string) (map[string]map[string][]byte, error) { + b, err := json.Marshal(ParamsGetRowsRange{ + High: high, + Low: low, + }) + if err != nil { + return nil, err + } + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("%s%s", r.url, routeGetRowsRange), bytes.NewReader(b)) + if err != nil { + return nil, err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + b, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("unexpected status code %d: %s", resp.StatusCode, b) + } + var rows map[string]map[string][]byte + if err := json.NewDecoder(resp.Body).Decode(&rows); err != nil { + return nil, err + } + return rows, nil +} + +func (r RemoteClient) Close() error { + //TODO implement me + panic("implement me") +} + +func (r RemoteClient) Clear() error { + //TODO implement me + panic("implement me") +} diff --git a/db2/store/store.go b/db2/store/store.go new file mode 100644 index 0000000000..595ddb4f14 --- /dev/null +++ b/db2/store/store.go @@ -0,0 +1,18 @@ +package store + +type Store interface { + Add(key, column string, data []byte, allowDuplicate bool) error + BulkAdd(itemsByKey map[string][]Item) error + Read(prefix string) ([][]byte, error) + GetRow(key string) (map[string][]byte, error) + GetRowKeys(prefix string) ([]string, error) + GetLatestValue(key string) ([]byte, error) + GetRowsRange(high, low string) (map[string]map[string][]byte, error) + Close() error + Clear() error +} + +var ( + _ Store = (*TableWrapper)(nil) + _ Store = (*RemoteClient)(nil) +) diff --git a/db2/storetest/bigtable.go b/db2/storetest/bigtable.go new file mode 100644 index 0000000000..9c3780ead2 --- /dev/null +++ b/db2/storetest/bigtable.go @@ -0,0 +1,38 @@ +package storetest + +import ( + "context" + "testing" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/bttest" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" +) + +func NewBigTable(t testing.TB) (*bigtable.Client, *bigtable.AdminClient) { + srv, err := bttest.NewServer("localhost:0") + if err != nil { + t.Fatal(err) + } + ctx := context.Background() + + conn, err := grpc.NewClient(srv.Addr, grpc.WithTransportCredentials(insecure.NewCredentials())) + if err != nil { + t.Fatal(err) + } + + project, instance := "proj", "instance" + adminClient, err := bigtable.NewAdminClient(ctx, project, instance, option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + + client, err := bigtable.NewClientWithConfig(ctx, project, instance, bigtable.ClientConfig{}, option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + + return client, adminClient +} diff --git a/db2/tables.go b/db2/tables.go new file mode 100644 index 0000000000..63e6084464 --- /dev/null +++ b/db2/tables.go @@ -0,0 +1,23 @@ +package db2 + +const BlocksRawTable = "blocks-raw" + +const BT_COLUMNFAMILY_BLOCK = "b" +const BT_COLUMN_BLOCK = "b" +const BT_COLUMNFAMILY_RECEIPTS = "r" +const BT_COLUMN_RECEIPTS = "r" +const BT_COLUMNFAMILY_TRACES = "t" +const BT_COLUMN_TRACES = "t" +const BT_COLUMNFAMILY_UNCLES = "u" +const BT_COLUMN_UNCLES = "u" + +const MAX_EL_BLOCK_NUMBER = int64(1_000_000_000_000 - 1) + +var raw = map[string][]string{ + BlocksRawTable: { + BT_COLUMNFAMILY_BLOCK, + BT_COLUMNFAMILY_RECEIPTS, + BT_COLUMNFAMILY_TRACES, + BT_COLUMNFAMILY_UNCLES, + }, +} diff --git a/eth1data/eth1data_test.go b/eth1data/eth1data_test.go new file mode 100644 index 0000000000..35ea332c6d --- /dev/null +++ b/eth1data/eth1data_test.go @@ -0,0 +1,233 @@ +package eth1data + +import ( + "context" + "database/sql" + "errors" + "fmt" + "os" + "strings" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/go-redis/redis/v8" + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slices" + + "github.com/gobitfly/eth2-beaconchain-explorer/cache" + "github.com/gobitfly/eth2-beaconchain-explorer/db" + "github.com/gobitfly/eth2-beaconchain-explorer/price" + "github.com/gobitfly/eth2-beaconchain-explorer/rpc" + "github.com/gobitfly/eth2-beaconchain-explorer/types" + "github.com/gobitfly/eth2-beaconchain-explorer/utils" +) + +func TestGetEth1Transaction(t *testing.T) { + node, exists := os.LookupEnv("ERIGON_NODE") + if !exists { + t.Skip() + } + + erigon, err := rpc.NewErigonClient(node) + if err != nil { + t.Fatal(err) + } + rpc.CurrentErigonClient = erigon + + utils.Config = &types.Config{ + Chain: types.Chain{ + ClConfig: types.ClChainConfig{ + DepositChainID: 1, + }, + }, + Bigtable: types.Bigtable{ + Project: "test", + Instance: "instanceTest", + Emulator: true, + EmulatorPort: 8086, + EmulatorHost: "127.0.0.1", + }, + Frontend: types.Frontend{ + ElCurrencyDivisor: 1e18, + }, + } + cache.TieredCache = &noCache{} + db.ReaderDb = noSQLReaderDb{} + + price.Init(1, node, "ETH", "ETH") + + bt, err := db.InitBigtableWithCache(context.Background(), "test", "instanceTest", "1", noRedis{}) + if err != nil { + t.Fatal(err) + } + if err := db.InitBigtableSchema(); err != nil { + if !errors.Is(err, db.ErrTableAlreadyExist) { + t.Fatal(err) + } + } + + tests := []struct { + name string + block int64 + txHash string + revertIndexes []int + }{ + { + name: "no revert", + block: 20870689, + txHash: "0x45ae8f94592cd0fb20cd6b50c8def1b5d478c1968806f129dda881d3eb7b968e", + }, + { + name: "recursive revert", + block: 20183291, + txHash: "0xf7d385f000250c073dfef9a36327c5d30a4c77a0c50588ce3eded29f6829a4cd", + revertIndexes: []int{0, 1}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + block, _, err := erigon.GetBlock(tt.block, "geth") + if err != nil { + t.Fatal(err) + } + + if err := bt.SaveBlock(block); err != nil { + t.Fatal(err) + } + + res, err := GetEth1Transaction(common.HexToHash(tt.txHash), "ETH") + if err != nil { + t.Fatal(err) + } + + for i, internal := range res.InternalTxns { + if !slices.Contains(tt.revertIndexes, i) { + if strings.Contains(string(internal.TracePath), "Transaction failed") { + t.Errorf("internal transaction should not be flagged as failed") + } + continue + } + if !strings.Contains(string(internal.TracePath), "Transaction failed") { + t.Errorf("internal transaction should be flagged as failed") + } + } + }) + } +} + +type noRedis struct { +} + +func (n noRedis) SCard(ctx context.Context, key string) *redis.IntCmd { + return redis.NewIntCmd(ctx) +} + +func (n noRedis) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.BoolCmd { + return redis.NewBoolCmd(ctx) +} + +func (n noRedis) Pipeline() redis.Pipeliner { + //TODO implement me + panic("implement me") +} + +func (n noRedis) Get(ctx context.Context, key string) *redis.StringCmd { + cmd := redis.NewStringCmd(ctx) + cmd.SetErr(redis.Nil) + return cmd +} + +func (n noRedis) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *redis.StatusCmd { + return redis.NewStatusCmd(ctx) +} + +type noCache struct { +} + +func (n noCache) Set(key string, value interface{}, expiration time.Duration) error { + return nil +} + +func (n noCache) SetString(key string, value string, expiration time.Duration) error { + return nil + +} + +func (n noCache) SetUint64(key string, value uint64, expiration time.Duration) error { + return nil + +} + +func (n noCache) SetBool(key string, value bool, expiration time.Duration) error { + return nil + +} + +func (n noCache) Get(ctx context.Context, key string, returnValue any) (any, error) { + return nil, fmt.Errorf("no cache") +} + +func (n noCache) GetString(ctx context.Context, key string) (string, error) { + return "", fmt.Errorf("no cache") +} + +func (n noCache) GetUint64(ctx context.Context, key string) (uint64, error) { + return 0, fmt.Errorf("no cache") +} + +func (n noCache) GetBool(ctx context.Context, key string) (bool, error) { + return false, fmt.Errorf("no cache") +} + +func (n noCache) GetStringWithLocalTimeout(key string, localExpiration time.Duration) (string, error) { + return "", fmt.Errorf("no cache") +} + +func (n noCache) GetUint64WithLocalTimeout(key string, localExpiration time.Duration) (uint64, error) { + return 0, fmt.Errorf("no cache") +} + +func (n noCache) GetBoolWithLocalTimeout(key string, localExpiration time.Duration) (bool, error) { + return false, fmt.Errorf("no cache") +} + +func (n noCache) GetWithLocalTimeout(key string, localExpiration time.Duration, returnValue interface{}) (interface{}, error) { + return nil, fmt.Errorf("no cache") +} + +type noSQLReaderDb struct { +} + +func (n noSQLReaderDb) Close() error { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) Get(dest interface{}, query string, args ...interface{}) error { + return nil +} + +func (n noSQLReaderDb) Select(dest interface{}, query string, args ...interface{}) error { + return nil +} + +func (n noSQLReaderDb) Query(query string, args ...any) (*sql.Rows, error) { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) Preparex(query string) (*sqlx.Stmt, error) { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + //TODO implement me + panic("implement me") +} + +func (n noSQLReaderDb) Rebind(query string) string { + //TODO implement me + panic("implement me") +} diff --git a/go.mod b/go.mod index ca0cfdb995..f0d7fc46be 100644 --- a/go.mod +++ b/go.mod @@ -5,8 +5,8 @@ go 1.21 toolchain go1.22.0 require ( - cloud.google.com/go/bigtable v1.16.0 - cloud.google.com/go/secretmanager v1.11.5 + cloud.google.com/go/bigtable v1.33.0 + cloud.google.com/go/secretmanager v1.14.0 firebase.google.com/go/v4 v4.14.1 github.com/ClickHouse/clickhouse-go/v2 v2.30.0 github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 @@ -29,6 +29,7 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang/protobuf v1.5.4 github.com/gomodule/redigo v1.8.0 + github.com/google/go-cmp v0.6.0 github.com/gorilla/context v1.1.1 github.com/gorilla/csrf v1.7.0 github.com/gorilla/mux v1.8.0 @@ -71,9 +72,10 @@ require ( golang.org/x/crypto v0.28.0 golang.org/x/sync v0.8.0 golang.org/x/text v0.19.0 - golang.org/x/time v0.5.0 - google.golang.org/api v0.170.0 - google.golang.org/protobuf v1.33.0 + golang.org/x/time v0.6.0 + google.golang.org/api v0.197.0 + google.golang.org/grpc v1.66.2 + google.golang.org/protobuf v1.34.2 gopkg.in/yaml.v3 v3.0.1 ) @@ -84,11 +86,14 @@ require ( ) require ( - cloud.google.com/go v0.112.1 // indirect - cloud.google.com/go/compute v1.24.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.7 // indirect - cloud.google.com/go/longrunning v0.5.5 // indirect + cel.dev/expr v0.16.0 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.3 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect + cloud.google.com/go/iam v1.2.1 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect + cloud.google.com/go/monitoring v1.21.1 // indirect github.com/ClickHouse/ch-go v0.61.5 // indirect github.com/DataDog/zstd v1.5.2 // indirect github.com/MicahParks/keyfunc v1.9.0 // indirect @@ -111,8 +116,7 @@ require ( github.com/bits-and-blooms/bitset v1.11.0 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect - github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20230928194634-aa077af62593 // indirect @@ -126,8 +130,8 @@ require ( github.com/deckarep/golang-set/v2 v2.5.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/envoyproxy/go-control-plane v0.12.0 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect @@ -136,14 +140,14 @@ require ( github.com/glendc/go-external-ip v0.1.0 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/s2a-go v0.1.7 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huandu/go-clone v1.6.0 // indirect @@ -182,6 +186,7 @@ require ( github.com/paulmach/orb v0.11.1 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/protolambda/zssz v0.1.5 // indirect github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 // indirect @@ -196,32 +201,34 @@ require ( github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect - go.opentelemetry.io/otel v1.26.0 // indirect - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/trace v1.26.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/otel v1.29.0 // indirect + go.opentelemetry.io/otel/metric v1.29.0 // indirect + go.opentelemetry.io/otel/sdk v1.29.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.29.0 // indirect + go.opentelemetry.io/otel/trace v1.29.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect google.golang.org/appengine/v2 v2.0.2 // indirect - google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - google.golang.org/grpc v1.62.1 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect lukechampine.com/blake3 v1.2.1 // indirect + rsc.io/binaryregexp v0.2.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) require ( - cloud.google.com/go/firestore v1.15.0 // indirect - cloud.google.com/go/storage v1.40.0 // indirect + cloud.google.com/go/firestore v1.16.0 // indirect + cloud.google.com/go/storage v1.43.0 // indirect github.com/BurntSushi/toml v1.2.1 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/attestantio/go-eth2-client v0.19.9 github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coocood/freecache v1.2.3 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/fatih/color v1.16.0 // indirect @@ -233,11 +240,11 @@ require ( github.com/go-openapi/spec v0.20.14 // indirect github.com/go-openapi/swag v0.22.9 // indirect github.com/goccy/go-yaml v1.10.0 // indirect - github.com/golang/glog v1.2.0 // indirect + github.com/golang/glog v1.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect github.com/hashicorp/go-version v1.6.0 @@ -269,11 +276,10 @@ require ( github.com/tklauser/numcpus v0.7.0 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/net v0.30.0 - golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/sys v0.26.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/appengine v1.6.8 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 434bae3937..72b373c73c 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +cel.dev/expr v0.16.0 h1:yloc84fytn4zmJX2GU3TkXGsaieaV7dQ057Qs4sIG2Y= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -13,42 +15,46 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= -cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.3 h1:VOEUIAADkkLtyfr3BLa3R8Ed/j6w1jTBmARx+wb5w5U= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.16.0 h1:sqJhhslzQOag49Mf2/uH3+u+NdfpPX0gjKAcgYpRUCU= -cloud.google.com/go/bigtable v1.16.0/go.mod h1:6f7WVXfeZaJz0xevUZoTA1s8sTmmrQqIAkRDVEHVg7I= -cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= -cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/bigtable v1.33.0 h1:2BDaWLRAwXO14DJL/u8crbV2oUbMZkIa2eGq8Yao1bk= +cloud.google.com/go/bigtable v1.33.0/go.mod h1:HtpnH4g25VT1pejHRtInlFPnN5sjTxbQlsYBjh9t5l0= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8= -cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= -cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= -cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= -cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= -cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= +cloud.google.com/go/firestore v1.16.0 h1:YwmDHcyrxVRErWcgxunzEaZxtNbc8QoFYA/JOEwDPgc= +cloud.google.com/go/firestore v1.16.0/go.mod h1:+22v/7p+WNBSQwdSwP57vz47aZiY+HrDkrOsJNhk7rg= +cloud.google.com/go/iam v1.2.1 h1:QFct02HRb7H12J/3utj0qf5tobFh9V4vR6h9eX5EBRU= +cloud.google.com/go/iam v1.2.1/go.mod h1:3VUIJDPpwT6p/amXRC5GY8fCCh70lxPygguVtI0Z4/g= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= -cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= +cloud.google.com/go/secretmanager v1.14.0 h1:P2RRu2NEsQyOjplhUPvWKqzDXUKzwejHLuSUBHI8c4w= +cloud.google.com/go/secretmanager v1.14.0/go.mod h1:q0hSFHzoW7eRgyYFH8trqEFavgrMeiJI4FETNN78vhM= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= +cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -178,19 +184,17 @@ github.com/cespare/cp v1.1.1/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= -github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59 h1:fLZ97KE86ELjEYJCEUVzmbhfzDxHHGwBrDVMd4XL6Bs= +github.com/cncf/xds/go v0.0.0-20240822171458-6449f94b4d59/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/datadriven v1.0.3-0.20230413201302-be42291fc80f h1:otljaYPt5hWxV3MUfO5dFPFiOXg9CyG5/kCfayTqsJ4= @@ -270,11 +274,11 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= -github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= +github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= +github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= -github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/ethereum/c-kzg-4844 v0.4.0 h1:3MS1s4JtA868KpJxroZoepdV0ZKBp3u/O5HcZ7R3nlY= github.com/ethereum/c-kzg-4844 v0.4.0/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0= @@ -341,8 +345,8 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -409,8 +413,8 @@ github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= -github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4= +github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -452,8 +456,8 @@ github.com/gomodule/redigo v1.8.0 h1:OXfLQ/k8XpYF8f8sZKd2Df4SDyzbLeC35OsBsB11rYg github.com/gomodule/redigo v1.8.0/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -478,8 +482,8 @@ github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -489,18 +493,18 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= -github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= -github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -946,6 +950,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -1179,7 +1185,6 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= @@ -1195,18 +1200,20 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= -go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= +go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= +go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= +go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= +go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= +go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= +go.opentelemetry.io/otel/sdk/metric v1.29.0 h1:K2CfmJohnRgvZ9UAj2/FhIf/okdWcNdBwe1m8xFXiSY= +go.opentelemetry.io/otel/sdk/metric v1.29.0/go.mod h1:6zZLdCl2fkauYoZIOn/soQIDSWFmNSRcICarHfuhNJQ= +go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= +go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1289,7 +1296,6 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1342,7 +1348,6 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1351,8 +1356,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1363,7 +1368,6 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1435,7 +1439,6 @@ golang.org/x/sys v0.0.0-20220406163625-3f8b81556e12/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1458,15 +1461,14 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1525,7 +1527,6 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1553,8 +1554,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= -google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1598,12 +1599,12 @@ google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210207032614-bba0dbe2a9ea/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210426193834-eac7f76ac494/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= -google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237 h1:RFiFrvy37/mpSpdySBDrUdipW/dHwsRwh3J3+A9VgT4= -google.golang.org/genproto/googleapis/api v0.0.0-20240318140521-94a12d6c2237/go.mod h1:Z5Iiy3jtmioajWHDGFk7CeugTyHtPvMHA4UTmUkyalE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc= +google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1622,8 +1623,8 @@ google.golang.org/grpc v1.35.0-dev.0.20201218190559-666aea1fb34c/go.mod h1:qjiiY google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= +google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= @@ -1640,8 +1641,8 @@ google.golang.org/protobuf v1.25.1-0.20201208041424-160c7477e0e8/go.mod h1:hFxJC google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= diff --git a/handlers/eth1Block.go b/handlers/eth1Block.go index 39b644cdc1..8485173833 100644 --- a/handlers/eth1Block.go +++ b/handlers/eth1Block.go @@ -126,7 +126,7 @@ func Eth1Block(w http.ResponseWriter, r *http.Request) { func GetExecutionBlockPageData(number uint64, limit int) (*types.Eth1BlockPageData, error) { block, err := db.BigtableClient.GetBlockFromBlocksTable(number) if diffToHead := int64(services.LatestEth1BlockNumber()) - int64(number); err != nil && diffToHead < 0 && diffToHead >= -5 { - block, _, err = rpc.CurrentErigonClient.GetBlock(int64(number), "parity/geth") + block, _, err = rpc.CurrentErigonClient.GetBlock(int64(number), "geth") } if err != nil { return nil, err @@ -188,10 +188,19 @@ func GetExecutionBlockPageData(number uint64, limit int) (*types.Eth1BlockPageDa if len(contractInteractionTypes) > i { contractInteraction = contractInteractionTypes[i] } + status := types.StatusType(tx.Status) + if status == types.StatusType_SUCCESS { + for _, itx := range tx.Itx { + if itx.ErrorMsg != "" { + status = types.StatusType_PARTIAL + break + } + } + } txs = append(txs, types.Eth1BlockPageTransaction{ Hash: fmt.Sprintf("%#x", tx.Hash), - HashFormatted: utils.FormatTransactionHash(tx.Hash, tx.ErrorMsg == ""), + HashFormatted: utils.FormatTransactionHashFromStatus(tx.Hash, status), From: fmt.Sprintf("%#x", tx.From), FromFormatted: utils.FormatAddressWithLimits(tx.From, names[string(tx.From)], false, "address", 15, 20, true), To: fmt.Sprintf("%#x", tx.To), diff --git a/rpc/erigon.go b/rpc/erigon.go index 49ea7f16b4..6513d0b1f4 100644 --- a/rpc/erigon.go +++ b/rpc/erigon.go @@ -3,12 +3,19 @@ package rpc import ( "context" "encoding/hex" + "encoding/json" "fmt" "math/big" + "net/http" + "os" + "strconv" "strings" + "sync" "time" "github.com/gobitfly/eth2-beaconchain-explorer/contracts/oneinchoracle" + "github.com/gobitfly/eth2-beaconchain-explorer/db2" + "github.com/gobitfly/eth2-beaconchain-explorer/db2/store" "github.com/gobitfly/eth2-beaconchain-explorer/erc20" "github.com/gobitfly/eth2-beaconchain-explorer/metrics" "github.com/gobitfly/eth2-beaconchain-explorer/types" @@ -34,6 +41,8 @@ type ErigonClient struct { ethClient *ethclient.Client chainID *big.Int multiChecker *Balance + + rawStore db2.RawStoreReader } var CurrentErigonClient *ErigonClient @@ -44,17 +53,40 @@ func NewErigonClient(endpoint string) (*ErigonClient, error) { endpoint: endpoint, } - rpcClient, err := geth_rpc.Dial(client.endpoint) - if err != nil { - return nil, fmt.Errorf("error dialing rpc node: %w", err) + var opts []geth_rpc.ClientOption + if utils.Config != nil { + if utils.Config.RawBigtable.Bigtable.Project != "" && utils.Config.RawBigtable.Bigtable.Instance != "" { + if utils.Config.RawBigtable.Bigtable.Emulator { + err := os.Setenv("BIGTABLE_EMULATOR_HOST", fmt.Sprintf("%s:%d", utils.Config.RawBigtable.Bigtable.EmulatorHost, utils.Config.RawBigtable.Bigtable.EmulatorPort)) + if err != nil { + return nil, fmt.Errorf("error while setting BIGTABLE_EMULATOR_HOST env: %w", err) + } + } + project, instance := utils.Config.RawBigtable.Bigtable.Project, utils.Config.RawBigtable.Bigtable.Instance + var db store.Store + bt, err := store.NewBigTable(project, instance, nil) + if err != nil { + return nil, err + } + db = store.Wrap(bt, db2.BlocksRawTable, "") + if utils.Config.RawBigtable.Remote != "" { + db = store.NewRemoteClient(utils.Config.RawBigtable.Remote) + } + rawStore := db2.WithCache(db2.NewRawStore(db)) + roundTripper := db2.NewBigTableEthRaw(rawStore, utils.Config.Chain.Id) + opts = append(opts, geth_rpc.WithHTTPClient(&http.Client{ + Transport: db2.NewWithFallback(roundTripper, http.DefaultTransport), + })) + client.rawStore = rawStore + } } - client.rpcClient = rpcClient - ethClient, err := ethclient.Dial(client.endpoint) + rpcClient, err := geth_rpc.DialOptions(context.Background(), client.endpoint, opts...) if err != nil { return nil, fmt.Errorf("error dialing rpc node: %w", err) } - client.ethClient = ethClient + client.rpcClient = rpcClient + client.ethClient = ethclient.NewClient(rpcClient) client.multiChecker, err = NewBalance(common.HexToAddress("0xb1F8e55c7f64D203C1400B9D8555d050F94aDF39"), client.ethClient) if err != nil { @@ -89,110 +121,110 @@ func (client *ErigonClient) GetRPCClient() *geth_rpc.Client { return client.rpcClient } +type minimalBlock struct { + Hash string `json:"hash"` +} + func (client *ErigonClient) GetBlock(number int64, traceMode string) (*types.Eth1Block, *types.GetBlockTimings, error) { - startTime := time.Now() + start := time.Now() + timings := &types.GetBlockTimings{} + mu := sync.Mutex{} + defer func() { - metrics.TaskDuration.WithLabelValues("rpc_el_get_block").Observe(time.Since(startTime).Seconds()) + metrics.TaskDuration.WithLabelValues("rpc_el_get_block").Observe(time.Since(start).Seconds()) }() ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) defer cancel() - start := time.Now() - timings := &types.GetBlockTimings{} - - block, err := client.ethClient.BlockByNumber(ctx, big.NewInt(int64(number))) - if err != nil { + var traces []*Eth1InternalTransactionWithPosition + var block *geth_types.Block + var receipts []*geth_types.Receipt + g := new(errgroup.Group) + g.Go(func() error { + b, err := client.ethClient.BlockByNumber(ctx, big.NewInt(number)) + if err != nil { + return err + } + mu.Lock() + timings.Headers = time.Since(start) + mu.Unlock() + block = b + return nil + }) + g.Go(func() error { + if err := client.rpcClient.CallContext(ctx, &receipts, "eth_getBlockReceipts", fmt.Sprintf("0x%x", number)); err != nil { + return fmt.Errorf("error retrieving receipts for block %v: %w", number, err) + } + mu.Lock() + timings.Receipts = time.Since(start) + mu.Unlock() + return nil + }) + g.Go(func() error { + t, err := client.getTrace(traceMode, big.NewInt(number)) + if err != nil { + return fmt.Errorf("error retrieving traces for block %v: %w", number, err) + } + traces = t + mu.Lock() + timings.Traces = time.Since(start) + mu.Unlock() + return nil + }) + if err := g.Wait(); err != nil { return nil, nil, err } - - timings.Headers = time.Since(start) - start = time.Now() - - c := &types.Eth1Block{ - Hash: block.Hash().Bytes(), - ParentHash: block.ParentHash().Bytes(), - UncleHash: block.UncleHash().Bytes(), - Coinbase: block.Coinbase().Bytes(), - Root: block.Root().Bytes(), - TxHash: block.TxHash().Bytes(), - ReceiptHash: block.ReceiptHash().Bytes(), - Difficulty: block.Difficulty().Bytes(), - Number: block.NumberU64(), - GasLimit: block.GasLimit(), - GasUsed: block.GasUsed(), - Time: timestamppb.New(time.Unix(int64(block.Time()), 0)), - Extra: block.Extra(), - MixDigest: block.MixDigest().Bytes(), - Bloom: block.Bloom().Bytes(), - Uncles: []*types.Eth1Block{}, - Transactions: []*types.Eth1Transaction{}, - Withdrawals: []*types.Eth1Withdrawal{}, - } - blobGasUsed := block.BlobGasUsed() - if blobGasUsed != nil { - c.BlobGasUsed = *blobGasUsed - } - excessBlobGas := block.ExcessBlobGas() - if excessBlobGas != nil { - c.ExcessBlobGas = *excessBlobGas - } - - if block.BaseFee() != nil { - c.BaseFee = block.BaseFee().Bytes() - } - - for _, uncle := range block.Uncles() { - pbUncle := &types.Eth1Block{ - Hash: uncle.Hash().Bytes(), - ParentHash: uncle.ParentHash.Bytes(), - UncleHash: uncle.UncleHash.Bytes(), - Coinbase: uncle.Coinbase.Bytes(), - Root: uncle.Root.Bytes(), - TxHash: uncle.TxHash.Bytes(), - ReceiptHash: uncle.ReceiptHash.Bytes(), - Difficulty: uncle.Difficulty.Bytes(), - Number: uncle.Number.Uint64(), - GasLimit: uncle.GasLimit, - GasUsed: uncle.GasUsed, - Time: timestamppb.New(time.Unix(int64(uncle.Time), 0)), - Extra: uncle.Extra, - MixDigest: uncle.MixDigest.Bytes(), - Bloom: uncle.Bloom.Bytes(), + // we cannot trust block.Hash(), some chain (gnosis) have extra field that are included in the hash computation + // so extract it from the receipts or from the node again if no receipt (it should be very rare) + var blockHash common.Hash + if len(receipts) != 0 { + blockHash = receipts[0].BlockHash + } else { + var res minimalBlock + if err := client.rpcClient.CallContext(ctx, &res, "eth_getBlockByNumber", fmt.Sprintf("0x%x", number), false); err != nil { + return nil, nil, fmt.Errorf("error retrieving blockHash %v: %w", number, err) } - - c.Uncles = append(c.Uncles, pbUncle) + blockHash = common.HexToHash(res.Hash) } - receipts := make([]*geth_types.Receipt, len(block.Transactions())) - - if len(block.Withdrawals()) > 0 { - withdrawalsIndexed := make([]*types.Eth1Withdrawal, 0, len(block.Withdrawals())) - for _, w := range block.Withdrawals() { - withdrawalsIndexed = append(withdrawalsIndexed, &types.Eth1Withdrawal{ - Index: w.Index, - ValidatorIndex: w.Validator, - Address: w.Address.Bytes(), - Amount: new(big.Int).SetUint64(w.Amount).Bytes(), - }) + withdrawals := make([]*types.Eth1Withdrawal, len(block.Withdrawals())) + for i, withdrawal := range block.Withdrawals() { + withdrawals[i] = &types.Eth1Withdrawal{ + Index: withdrawal.Index, + ValidatorIndex: withdrawal.Validator, + Address: withdrawal.Address.Bytes(), + Amount: new(big.Int).SetUint64(withdrawal.Amount).Bytes(), } - c.Withdrawals = withdrawalsIndexed } - txs := block.Transactions() - - for _, tx := range txs { + transactions := make([]*types.Eth1Transaction, len(block.Transactions())) + traceIndex := 0 + if len(receipts) != len(block.Transactions()) { + return nil, nil, fmt.Errorf("block %s receipts length [%d] mismatch with transactions length [%d]", block.Number(), len(receipts), len(block.Transactions())) + } + for txPosition, receipt := range receipts { + logs := make([]*types.Eth1Log, len(receipt.Logs)) + for i, log := range receipt.Logs { + topics := make([][]byte, len(log.Topics)) + for j, topic := range log.Topics { + topics[j] = topic.Bytes() + } + logs[i] = &types.Eth1Log{ + Address: log.Address.Bytes(), + Data: log.Data, + Removed: log.Removed, + Topics: topics, + } + } - var from []byte - sender, err := geth_types.Sender(geth_types.NewCancunSigner(tx.ChainId()), tx) - if err != nil { - from, _ = hex.DecodeString("abababababababababababababababababababab") - logrus.Errorf("error converting tx %v to msg: %v", tx.Hash(), err) - } else { - from = sender.Bytes() + var internals []*types.Eth1InternalTransaction + for ; traceIndex < len(traces) && traces[traceIndex].txPosition == txPosition; traceIndex++ { + internals = append(internals, &traces[traceIndex].Eth1InternalTransaction) } - pbTx := &types.Eth1Transaction{ + tx := block.Transactions()[txPosition] + transactions[txPosition] = &types.Eth1Transaction{ Type: uint32(tx.Type()), Nonce: tx.Nonce(), GasPrice: tx.GasPrice().Bytes(), @@ -201,175 +233,131 @@ func (client *ErigonClient) GetBlock(number int64, traceMode string) (*types.Eth Gas: tx.Gas(), Value: tx.Value().Bytes(), Data: tx.Data(), - From: from, - ChainId: tx.ChainId().Bytes(), - AccessList: []*types.AccessList{}, - Hash: tx.Hash().Bytes(), - Itx: []*types.Eth1InternalTransaction{}, - BlobVersionedHashes: [][]byte{}, - } - - if tx.BlobGasFeeCap() != nil { - pbTx.MaxFeePerBlobGas = tx.BlobGasFeeCap().Bytes() - } - for _, h := range tx.BlobHashes() { - pbTx.BlobVersionedHashes = append(pbTx.BlobVersionedHashes, h.Bytes()) - } - - if tx.To() != nil { - pbTx.To = tx.To().Bytes() - } - - c.Transactions = append(c.Transactions, pbTx) - - } - - g := new(errgroup.Group) - - g.Go(func() error { - if block.NumberU64() == 0 { // genesis block is not traceable - return nil - } - - var traceError error - if traceMode == "parity" || traceMode == "parity/geth" { - traces, err := client.TraceParity(block.NumberU64()) - - if err != nil { - if traceMode == "parity" { - return fmt.Errorf("error tracing block via parity style traces (%v), %v: %w", block.Number(), block.Hash(), err) - } else { - logger.Errorf("error tracing block via parity style traces (%v), %v: %v", block.Number(), block.Hash(), err) - - } - traceError = err - } else { - for _, trace := range traces { - if trace.Type == "reward" { - continue - } - - if trace.TransactionHash == "" { - continue - } - - if trace.TransactionPosition >= len(c.Transactions) { - return fmt.Errorf("error transaction position %v out of range", trace.TransactionPosition) - } - - if trace.Error == "" { - c.Transactions[trace.TransactionPosition].Status = 1 - } else { - c.Transactions[trace.TransactionPosition].Status = 0 - c.Transactions[trace.TransactionPosition].ErrorMsg = trace.Error - } - - tracePb := &types.Eth1InternalTransaction{ - Type: trace.Type, - Path: fmt.Sprint(trace.TraceAddress), - } - - tracePb.From, tracePb.To, tracePb.Value, tracePb.Type = trace.ConvertFields() - c.Transactions[trace.TransactionPosition].Itx = append(c.Transactions[trace.TransactionPosition].Itx, tracePb) + To: func() []byte { + if tx.To() != nil { + return tx.To().Bytes() } - } - } - - if traceMode == "geth" || (traceError != nil && traceMode == "parity/geth") { - - gethTraceData, err := client.TraceGeth(block.Hash()) - - if err != nil { - return fmt.Errorf("error tracing block via geth style traces (%v), %v: %w", block.Number(), block.Hash(), err) - } - - // logger.Infof("retrieved %v calls via geth", len(gethTraceData)) - - for _, trace := range gethTraceData { - if trace.Error == "" { - c.Transactions[trace.TransactionPosition].Status = 1 - } else { - c.Transactions[trace.TransactionPosition].Status = 0 - c.Transactions[trace.TransactionPosition].ErrorMsg = trace.Error + return nil + }(), + From: func() []byte { + // this won't make a request in most cases as the sender is already present in the cache + // context https://github.com/ethereum/go-ethereum/blob/v1.14.11/ethclient/ethclient.go#L268 + sender, err := client.ethClient.TransactionSender(context.Background(), tx, blockHash, uint(txPosition)) + if err != nil { + sender = common.HexToAddress("abababababababababababababababababababab") + logrus.Errorf("could not retrieve tx sender %v: %v", tx.Hash(), err) } - - if trace.Type == "CREATE2" { - trace.Type = "CREATE" + return sender.Bytes() + }(), + ChainId: tx.ChainId().Bytes(), + AccessList: []*types.AccessList{}, + Hash: tx.Hash().Bytes(), + ContractAddress: receipt.ContractAddress[:], + CommulativeGasUsed: receipt.CumulativeGasUsed, + GasUsed: receipt.GasUsed, + LogsBloom: receipt.Bloom[:], + Status: receipt.Status, + Logs: logs, + Itx: internals, + MaxFeePerBlobGas: func() []byte { + if tx.BlobGasFeeCap() != nil { + return tx.BlobGasFeeCap().Bytes() } - - tracePb := &types.Eth1InternalTransaction{ - Type: strings.ToLower(trace.Type), - Path: "0", + return nil + }(), + BlobVersionedHashes: func() (b [][]byte) { + for _, h := range tx.BlobHashes() { + b = append(b, h.Bytes()) } - - tracePb.From = trace.From.Bytes() - tracePb.To = trace.To.Bytes() - tracePb.Value = common.FromHex(trace.Value) - if trace.Type == "CREATE" { - } else if trace.Type == "SELFDESTRUCT" { - } else if trace.Type == "SUICIDE" { - } else if trace.Type == "CALL" || trace.Type == "DELEGATECALL" || trace.Type == "STATICCALL" { - } else if trace.Type == "" { - logrus.WithFields(logrus.Fields{"type": trace.Type, "block.Number": block.Number(), "block.Hash": block.Hash()}).Errorf("geth style trace without type") - spew.Dump(trace) - continue - } else { - spew.Dump(trace) - logrus.Fatalf("unknown trace type %v in tx %v", trace.Type, trace.TransactionPosition) + return b + }(), + BlobGasPrice: func() []byte { + if receipt.BlobGasPrice != nil { + return receipt.BlobGasPrice.Bytes() } - - logger.Tracef("appending trace %v to tx %x from %v to %v value %v", trace.TransactionPosition, c.Transactions[trace.TransactionPosition].Hash, trace.From, trace.To, trace.Value) - - c.Transactions[trace.TransactionPosition].Itx = append(c.Transactions[trace.TransactionPosition].Itx, tracePb) - } + return nil + }(), + BlobGasUsed: receipt.BlobGasUsed, } - - timings.Traces = time.Since(start) - - // logrus.Infof("retrieved %v traces for %v txs", len(traces), len(c.Transactions)) - - return nil - }) - - if err = client.rpcClient.CallContext(ctx, &receipts, "eth_getBlockReceipts", fmt.Sprintf("0x%x", block.NumberU64())); err != nil { - return nil, nil, fmt.Errorf("error retrieving receipts for block %v: %w", block.Number(), err) } - timings.Receipts = time.Since(start) - start = time.Now() - - for i, r := range receipts { - c.Transactions[i].ContractAddress = r.ContractAddress[:] - c.Transactions[i].CommulativeGasUsed = r.CumulativeGasUsed - c.Transactions[i].GasUsed = r.GasUsed - c.Transactions[i].LogsBloom = r.Bloom[:] - c.Transactions[i].Logs = make([]*types.Eth1Log, 0, len(r.Logs)) - - if r.BlobGasPrice != nil { - c.Transactions[i].BlobGasPrice = r.BlobGasPrice.Bytes() + uncles := make([]*types.Eth1Block, len(block.Uncles())) + for i, uncle := range block.Uncles() { + uncles[i] = &types.Eth1Block{ + Hash: uncle.Hash().Bytes(), + ParentHash: uncle.ParentHash.Bytes(), + UncleHash: uncle.UncleHash.Bytes(), + Coinbase: uncle.Coinbase.Bytes(), + Root: uncle.Root.Bytes(), + TxHash: uncle.TxHash.Bytes(), + ReceiptHash: uncle.ReceiptHash.Bytes(), + Difficulty: uncle.Difficulty.Bytes(), + Number: uncle.Number.Uint64(), + GasLimit: uncle.GasLimit, + GasUsed: uncle.GasUsed, + Time: timestamppb.New(time.Unix(int64(uncle.Time), 0)), + Extra: uncle.Extra, + MixDigest: uncle.MixDigest.Bytes(), + Bloom: uncle.Bloom.Bytes(), } - c.Transactions[i].BlobGasUsed = r.BlobGasUsed + } - for _, l := range r.Logs { - pbLog := &types.Eth1Log{ - Address: l.Address.Bytes(), - Data: l.Data, - Removed: l.Removed, - Topics: make([][]byte, 0, len(l.Topics)), + return &types.Eth1Block{ + Hash: blockHash.Bytes(), + ParentHash: block.ParentHash().Bytes(), + UncleHash: block.UncleHash().Bytes(), + Coinbase: block.Coinbase().Bytes(), + Root: block.Root().Bytes(), + TxHash: block.TxHash().Bytes(), + ReceiptHash: block.ReceiptHash().Bytes(), + Difficulty: block.Difficulty().Bytes(), + Number: block.NumberU64(), + GasLimit: block.GasLimit(), + GasUsed: block.GasUsed(), + Time: timestamppb.New(time.Unix(int64(block.Time()), 0)), + Extra: block.Extra(), + MixDigest: block.MixDigest().Bytes(), + Bloom: block.Bloom().Bytes(), + BaseFee: func() []byte { + if block.BaseFee() != nil { + return block.BaseFee().Bytes() } - - for _, t := range l.Topics { - pbLog.Topics = append(pbLog.Topics, t.Bytes()) + return nil + }(), + Uncles: uncles, + Transactions: transactions, + Withdrawals: withdrawals, + BlobGasUsed: func() uint64 { + blobGasUsed := block.BlobGasUsed() + if blobGasUsed != nil { + return *blobGasUsed } - c.Transactions[i].Logs = append(c.Transactions[i].Logs, pbLog) - } - } + return 0 + }(), + ExcessBlobGas: func() uint64 { + excessBlobGas := block.ExcessBlobGas() + if excessBlobGas != nil { + return *excessBlobGas + } + return 0 + }(), + }, timings, nil +} - if err := g.Wait(); err != nil { - return nil, nil, fmt.Errorf("error retrieving traces for block %v: %w", block.Number(), err) +func (client *ErigonClient) GetBlocks(start, end int64, traceMode string) ([]*types.Eth1Block, error) { + _, err := client.rawStore.ReadBlocksByNumber(client.chainID.Uint64(), start, end) + if err != nil { + return nil, err } - - return c, timings, nil + blocks := make([]*types.Eth1Block, end-start+1) + for i := start; i <= end; i++ { + block, _, err := client.GetBlock(i, traceMode) + if err != nil { + return nil, err + } + blocks[i-start] = block + } + return blocks, nil } func (client *ErigonClient) GetBlockNumberByHash(hash string) (uint64, error) { @@ -443,10 +431,10 @@ func extractCalls(r *GethTraceCallResult, d *[]*GethTraceCallResult) { } } -func (client *ErigonClient) TraceGeth(blockHash common.Hash) ([]*GethTraceCallResult, error) { +func (client *ErigonClient) TraceGeth(blockNumber *big.Int) ([]*GethTraceCallResult, error) { var res []*GethTraceCallResultWrapper - err := client.rpcClient.Call(&res, "debug_traceBlockByHash", blockHash, gethTracerArg) + err := client.rpcClient.Call(&res, "debug_traceBlockByNumber", hexutil.EncodeBig(blockNumber), gethTracerArg) if err != nil { return nil, err } @@ -756,3 +744,431 @@ func toCallArg(msg ethereum.CallMsg) interface{} { } return arg } + +func (client *ErigonClient) getTrace(traceMode string, blockNumber *big.Int) ([]*Eth1InternalTransactionWithPosition, error) { + if blockNumber.Uint64() == 0 { // genesis block is not traceable + return nil, nil + } + switch traceMode { + case "parity": + return client.getTraceParity(blockNumber) + case "parity/geth": + traces, err := client.getTraceParity(blockNumber) + if err == nil { + return traces, nil + } + logger.Errorf("error tracing block via parity style traces (%v): %v", blockNumber, err) + // fallback to geth traces + fallthrough + case "geth": + return client.getTraceGeth(blockNumber) + } + return nil, fmt.Errorf("unknown trace mode '%s'", traceMode) +} + +func (client *ErigonClient) getTraceParity(blockNumber *big.Int) ([]*Eth1InternalTransactionWithPosition, error) { + traces, err := client.TraceParity(blockNumber.Uint64()) + if err != nil { + return nil, fmt.Errorf("error tracing block via parity style traces (%v): %w", blockNumber, err) + } + + var indexedTraces []*Eth1InternalTransactionWithPosition + for _, trace := range traces { + if trace.Type == "reward" { + continue + } + if trace.TransactionHash == "" { + continue + } + + from, to, value, traceType := trace.ConvertFields() + indexedTraces = append(indexedTraces, &Eth1InternalTransactionWithPosition{ + Eth1InternalTransaction: types.Eth1InternalTransaction{ + Type: traceType, + From: from, + To: to, + Value: value, + ErrorMsg: trace.Error, + Path: fmt.Sprint(trace.TraceAddress), + }, + txPosition: trace.TransactionPosition, + }) + } + return indexedTraces, nil +} + +func (client *ErigonClient) getTraceGeth(blockNumber *big.Int) ([]*Eth1InternalTransactionWithPosition, error) { + traces, err := client.TraceGeth(blockNumber) + if err != nil { + return nil, fmt.Errorf("error tracing block via geth style traces (%v): %w", blockNumber, err) + } + + var indexedTraces []*Eth1InternalTransactionWithPosition + var txPosition int + paths := make(map[*GethTraceCallResult]string) + for i, trace := range traces { + switch trace.Type { + case "CREATE2": + trace.Type = "CREATE" + case "CREATE", "SELFDESTRUCT", "SUICIDE", "CALL", "DELEGATECALL", "STATICCALL", "CALLCODE": + case "": + logrus.WithFields(logrus.Fields{"type": trace.Type, "block.Number": blockNumber}).Errorf("geth style trace without type") + spew.Dump(trace) + continue + default: + spew.Dump(trace) + logrus.Fatalf("unknown trace type %v in tx %v:%v", trace.Type, blockNumber.String(), trace.TransactionPosition) + } + if txPosition != trace.TransactionPosition { + txPosition = trace.TransactionPosition + paths = make(map[*GethTraceCallResult]string) + } + for index, call := range trace.Calls { + paths[call] = fmt.Sprintf("%s %d", paths[trace], index) + } + + logger.Tracef("appending trace %v to tx %d:%x from %v to %v value %v", i, blockNumber, trace.TransactionPosition, trace.From, trace.To, trace.Value) + indexedTraces = append(indexedTraces, &Eth1InternalTransactionWithPosition{ + Eth1InternalTransaction: types.Eth1InternalTransaction{ + Type: strings.ToLower(trace.Type), + From: trace.From.Bytes(), + To: trace.To.Bytes(), + Value: common.FromHex(trace.Value), + ErrorMsg: trace.Error, + Path: fmt.Sprintf("[%s]", strings.TrimPrefix(paths[trace], " ")), + }, + txPosition: trace.TransactionPosition, + }) + } + return indexedTraces, nil +} + +type Eth1InternalTransactionWithPosition struct { + types.Eth1InternalTransaction + txPosition int +} + +type BlockResponse struct { + Hash string `json:"hash"` + ParentHash string `json:"parentHash"` + UncleHash string `json:"uncleHash"` + Coinbase string `json:"coinbase"` + Root string `json:"stateRoot"` + TxHash string `json:"transactionsHash"` + ReceiptHash string `json:"receiptsHash"` + Difficulty string `json:"difficulty"` + Number string `json:"number"` + GasLimit string `json:"gasLimit"` + GasUsed string `json:"gasUsed"` + Time string `json:"timestamp"` + Extra string `json:"extraData"` + MixDigest string `json:"mixHash"` + Bloom string `json:"logsBloom"` + Transactions []*geth_types.Transaction `json:"transactions"` + Withdrawals []*geth_types.Withdrawal `json:"withdrawals"` + BlobGasUsed *string `json:"blobGasUsed"` + ExcessBlobGas *string `json:"excessBlobGas"` + BaseFee string `json:"baseFee"` +} + +type BlockResponseWithUncles struct { + BlockResponse + Uncles []*geth_types.Block +} + +type RPCBlock struct { + Hash common.Hash `json:"hash"` + UncleHashes []common.Hash `json:"uncles"` +} + +func (client *ErigonClient) GetBlocksByBatch(blockNumbers []int64) ([]*types.Eth1Block, error) { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + var ethBlock []*types.Eth1Block + var batchCall []geth_rpc.BatchElem + batchCallNums := 3 + + if len(blockNumbers) == 0 { + return nil, fmt.Errorf("block numbers slice is empty") + } + + for _, blockNumber := range blockNumbers { + batchCall = append(batchCall, geth_rpc.BatchElem{ + Method: "eth_getBlockByNumber", + Args: []interface{}{blockNumber, true}, + Result: new(json.RawMessage), + }) + + batchCall = append(batchCall, geth_rpc.BatchElem{ + Method: "eth_getBlockReceipts", + Args: []interface{}{blockNumber}, + Result: new([]geth_types.Receipt), + }) + + batchCall = append(batchCall, geth_rpc.BatchElem{ + Method: "trace_block", + Args: []interface{}{blockNumber}, + Result: new([]ParityTraceResult), + }) + } + + if len(batchCall) == 0 { + return ethBlock, nil + } + + err := client.rpcClient.BatchCallContext(ctx, batchCall) + if err != nil { + logger.Errorf("error while batch calling rpc for block details, error: %s", err) + return nil, err + } + + for i := 0; i < len(batchCall)/batchCallNums; i++ { + blockResult := batchCall[i*batchCallNums].Result.(*json.RawMessage) + receiptsResult := batchCall[i*batchCallNums+1].Result.(*[]geth_types.Receipt) + tracesResults := batchCall[i*batchCallNums+2].Result.(*[]ParityTraceResult) + + var head *geth_types.Header + if err := json.Unmarshal(*blockResult, &head); err != nil { + return nil, fmt.Errorf("error while unmarshaling block results to Header type, error: %v", err) + } + var body RPCBlock + if err := json.Unmarshal(*blockResult, &body); err != nil { + return nil, fmt.Errorf("error while unmarshaling block results to RPCBlock type, error: %v", err) + } + + if head.UncleHash == geth_types.EmptyUncleHash && len(body.UncleHashes) > 0 { + return nil, fmt.Errorf("server returned non-empty uncle list but block header indicates no uncles") + } + if head.UncleHash != geth_types.EmptyUncleHash && len(body.UncleHashes) == 0 { + return nil, fmt.Errorf("server returned empty uncle list but block header indicates uncles") + } + + var uncles []*geth_types.Block + if len(body.UncleHashes) > 0 { + uncles = make([]*geth_types.Block, len(body.UncleHashes)) + uncleHashes := make([]geth_rpc.BatchElem, len(body.UncleHashes)) + for i := range uncleHashes { + uncleHashes[i] = geth_rpc.BatchElem{ + Method: "eth_getUncleByBlockHashAndIndex", + Args: []interface{}{body.Hash, hexutil.EncodeUint64(uint64(i))}, + Result: &uncles[i], + } + } + if err := client.rpcClient.BatchCallContext(ctx, uncleHashes); err != nil { + return nil, fmt.Errorf("error while batch calling uncle hashes, error: %v", err) + } + + for i := range uncleHashes { + if uncleHashes[i].Error != nil { + return nil, fmt.Errorf("error in uncle hash, error: %v", uncleHashes[i].Error) + } + if uncles[i] == nil { + return nil, fmt.Errorf("got null header for uncle %d of block %x", i, body.Hash[:]) + } + } + } + + var blockResponse BlockResponse + err := json.Unmarshal(*blockResult, &blockResponse) + if err != nil { + logger.Errorf("error while unmarshalling block results to BlockResponse type: %s", err) + continue + } + + blockResp := BlockResponseWithUncles{ + BlockResponse: blockResponse, + Uncles: uncles, + } + + blockDetails := client.processBlockResult(blockResp) + client.processReceiptsAndTraces(blockDetails, *receiptsResult, *tracesResults) + ethBlock = append(ethBlock, blockDetails) + } + + return ethBlock, nil +} + +func (client *ErigonClient) processBlockResult(block BlockResponseWithUncles) *types.Eth1Block { + blockNumber, err := strconv.ParseUint(block.Number, 0, 64) + if err != nil { + logger.Errorf("error while parsing block number to uint64, error: %s", err) + } + gasLimit, err := strconv.ParseUint(block.GasLimit, 0, 64) + if err != nil { + logger.Errorf("error while parsing gas limit, block: %d, error: %s", blockNumber, err) + } + gasUsed, err := strconv.ParseUint(block.GasUsed, 0, 64) + if err != nil { + logger.Errorf("error while parsing gas used, block: %d, error: %s", blockNumber, err) + } + blockTime, err := strconv.ParseInt(block.Time, 0, 64) + if err != nil { + logger.Errorf("error while parsing block time, block: %d, error: %s", blockNumber, err) + } + + var blobGasUsed, excessBlobGas uint64 + if block.BlobGasUsed != nil { + blobGasUsedStr := *block.BlobGasUsed + blobGasUsed, err = strconv.ParseUint(blobGasUsedStr[2:], 16, 64) // remove "0x" and parse as hex + if err != nil { + logger.Errorf("error while parsing blob gas used, block: %d, error: %s", blockNumber, err) + } + } + if block.ExcessBlobGas != nil { + excessBlobGasStr := *block.ExcessBlobGas + excessBlobGas, err = strconv.ParseUint(excessBlobGasStr[2:], 16, 64) + if err != nil { + logger.Errorf("error while parsing excess blob gas, block: %d, error: %s", blockNumber, err) + } + } + + ethBlock := &types.Eth1Block{ + Hash: []byte(block.Hash), + ParentHash: []byte(block.ParentHash), + UncleHash: []byte(block.UncleHash), + Coinbase: []byte(block.Coinbase), + Root: []byte(block.Root), + TxHash: []byte(block.TxHash), + ReceiptHash: []byte(block.ReceiptHash), + Difficulty: []byte(block.Difficulty), + Number: blockNumber, + GasLimit: gasLimit, + GasUsed: gasUsed, + Time: timestamppb.New(time.Unix(blockTime, 0)), + Extra: []byte(block.Extra), + MixDigest: []byte(block.MixDigest), + Bloom: []byte(block.Bloom), + Uncles: []*types.Eth1Block{}, + Transactions: []*types.Eth1Transaction{}, + Withdrawals: []*types.Eth1Withdrawal{}, + BlobGasUsed: blobGasUsed, + ExcessBlobGas: excessBlobGas, + BaseFee: []byte(block.BaseFee), + } + + if len(block.Withdrawals) > 0 { + withdrawalsIndexed := make([]*types.Eth1Withdrawal, 0, len(block.Withdrawals)) + for _, w := range block.Withdrawals { + withdrawalsIndexed = append(withdrawalsIndexed, &types.Eth1Withdrawal{ + Index: w.Index, + ValidatorIndex: w.Validator, + Address: w.Address.Bytes(), + Amount: new(big.Int).SetUint64(w.Amount).Bytes(), + }) + } + ethBlock.Withdrawals = withdrawalsIndexed + } + + txs := block.Transactions + + for _, tx := range txs { + + var from []byte + sender, err := geth_types.Sender(geth_types.NewCancunSigner(tx.ChainId()), tx) + if err != nil { + from, _ = hex.DecodeString("abababababababababababababababababababab") + logrus.Errorf("error converting tx %v to msg: %v", tx.Hash(), err) + } else { + from = sender.Bytes() + } + + pbTx := &types.Eth1Transaction{ + Type: uint32(tx.Type()), + Nonce: tx.Nonce(), + GasPrice: tx.GasPrice().Bytes(), + MaxPriorityFeePerGas: tx.GasTipCap().Bytes(), + MaxFeePerGas: tx.GasFeeCap().Bytes(), + Gas: tx.Gas(), + Value: tx.Value().Bytes(), + Data: tx.Data(), + From: from, + ChainId: tx.ChainId().Bytes(), + AccessList: []*types.AccessList{}, + Hash: tx.Hash().Bytes(), + Itx: []*types.Eth1InternalTransaction{}, + BlobVersionedHashes: [][]byte{}, + } + + if tx.BlobGasFeeCap() != nil { + pbTx.MaxFeePerBlobGas = tx.BlobGasFeeCap().Bytes() + } + for _, h := range tx.BlobHashes() { + pbTx.BlobVersionedHashes = append(pbTx.BlobVersionedHashes, h.Bytes()) + } + + if tx.To() != nil { + pbTx.To = tx.To().Bytes() + } + + ethBlock.Transactions = append(ethBlock.Transactions, pbTx) + + } + + return ethBlock +} + +func (client *ErigonClient) processReceiptsAndTraces(ethBlock *types.Eth1Block, receipts []geth_types.Receipt, traces []ParityTraceResult) { + traceIndex := 0 + var indexedTraces []*Eth1InternalTransactionWithPosition + + for _, trace := range traces { + if trace.Type == "reward" { + continue + } + if trace.TransactionHash == "" { + continue + } + if trace.TransactionPosition >= len(ethBlock.Transactions) { + logrus.Errorf("error transaction position %v out of range", trace.TransactionPosition) + return + } + + from, to, value, traceType := trace.ConvertFields() + indexedTraces = append(indexedTraces, &Eth1InternalTransactionWithPosition{ + Eth1InternalTransaction: types.Eth1InternalTransaction{ + Type: traceType, + From: from, + To: to, + Value: value, + ErrorMsg: trace.Error, + Path: fmt.Sprint(trace.TraceAddress), + }, + txPosition: trace.TransactionPosition, + }) + } + + for txPosition, receipt := range receipts { + ethBlock.Transactions[txPosition].ContractAddress = receipt.ContractAddress[:] + ethBlock.Transactions[txPosition].CommulativeGasUsed = receipt.CumulativeGasUsed + ethBlock.Transactions[txPosition].GasUsed = receipt.GasUsed + ethBlock.Transactions[txPosition].LogsBloom = receipt.Bloom[:] + ethBlock.Transactions[txPosition].Logs = make([]*types.Eth1Log, 0, len(receipt.Logs)) + ethBlock.Transactions[txPosition].Status = receipt.Status + + if receipt.BlobGasPrice != nil { + ethBlock.Transactions[txPosition].BlobGasPrice = receipt.BlobGasPrice.Bytes() + } + ethBlock.Transactions[txPosition].BlobGasUsed = receipt.BlobGasUsed + + for _, l := range receipt.Logs { + topics := make([][]byte, 0, len(l.Topics)) + for _, t := range l.Topics { + topics = append(topics, t.Bytes()) + } + ethBlock.Transactions[txPosition].Logs = append(ethBlock.Transactions[txPosition].Logs, &types.Eth1Log{ + Address: l.Address.Bytes(), + Data: l.Data, + Removed: l.Removed, + Topics: topics, + }) + } + if len(indexedTraces) == 0 { + continue + } + for ; traceIndex < len(indexedTraces) && indexedTraces[traceIndex].txPosition == txPosition; traceIndex++ { + ethBlock.Transactions[txPosition].Itx = append(ethBlock.Transactions[txPosition].Itx, &indexedTraces[traceIndex].Eth1InternalTransaction) + } + } + +} diff --git a/rpc/erigon_test.go b/rpc/erigon_test.go new file mode 100644 index 0000000000..ccfb756b9e --- /dev/null +++ b/rpc/erigon_test.go @@ -0,0 +1,75 @@ +package rpc + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "testing" + + "github.com/sirupsen/logrus" +) + +func TestGnosisBlockHash(t *testing.T) { + gnosisURL := os.Getenv("GNOSIS_NODE_URL") + if gnosisURL == "" { + t.Skip("skipping test, set GNOSIS_NODE_URL") + } + + erigonClient, err := NewErigonClient(gnosisURL) + if err != nil { + logrus.Fatalf("error initializing erigon client: %v", err) + } + + tests := []struct { + name string + block int64 + }{ + { + name: "old block with extra fields", + block: 68140, + }, + { + name: "old block with extra fields #2", + block: 19187811, + }, + { + name: "without receipts", + block: 38039324, + }, + { + name: "newest block", + block: 37591835, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parsed, _, err := erigonClient.GetBlock(tt.block, "parity/geth") + if err != nil { + t.Fatal(err) + } + + type MinimalBlock struct { + Result struct { + Hash string `json:"hash"` + } `json:"result"` + } + + query := fmt.Sprintf(`{"jsonrpc":"2.0","method":"eth_getBlockByNumber","params": ["0x%x",false],"id":1}`, tt.block) + resp, err := http.Post(gnosisURL, "application/json", bytes.NewBufferString(query)) + if err != nil { + t.Fatal(err) + } + + var res MinimalBlock + if err := json.NewDecoder(resp.Body).Decode(&res); err != nil { + t.Fatal(err) + } + + if got, want := fmt.Sprintf("0x%x", parsed.Hash), res.Result.Hash; got != want { + t.Errorf("got %v want %v", got, want) + } + }) + } +} diff --git a/templates/eth1tx.html b/templates/eth1tx.html index 85040600da..18800f7705 100644 --- a/templates/eth1tx.html +++ b/templates/eth1tx.html @@ -255,7 +255,7 @@

{{ if gt (len .InternalTxns) 0 }}