diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 7e6614089..23196270b 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -4,6 +4,8 @@ # in order to work execute once: # git config blame.ignoreRevsFile .git-blame-ignore-revs +#chore(eslint): add `natural sorting` for `json` files +f3da99c5c685eae1914aad8513d3b4b2f1cdcaa2 # style(eslint): add `typescript delimiter` rule d6b42edb3f687cad3082a9044bcb71fc39a46176 # style(eslint): apply `max-len` rule diff --git a/backend/cmd/blobindexer/main.go b/backend/cmd/blobindexer/main.go index 9f7d37b4a..5be4f6572 100644 --- a/backend/cmd/blobindexer/main.go +++ b/backend/cmd/blobindexer/main.go @@ -16,7 +16,7 @@ import ( func Run() { fs := flag.NewFlagSet("fs", flag.ExitOnError) - configFlag := fs.String("config", "config.yml", "path to config") + configFlag := fs.String("config", "", "path to config") versionFlag := fs.Bool("version", false, "print version and exit") _ = fs.Parse(os.Args[2:]) if *versionFlag { diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index d7b9b77a1..e2fe9e1c7 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -18,6 +18,7 @@ import ( "time" "github.com/coocood/freecache" + "github.com/davecgh/go-spew/spew" "github.com/ethereum/go-ethereum/common" "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/cmd/misc/commands" @@ -32,6 +33,7 @@ import ( edb "github.com/gobitfly/beaconchain/pkg/exporter/db" "github.com/gobitfly/beaconchain/pkg/exporter/modules" "github.com/gobitfly/beaconchain/pkg/exporter/services" + "github.com/gobitfly/beaconchain/pkg/notification" _ "github.com/jackc/pgx/v5/stdlib" "github.com/pkg/errors" utilMath "github.com/protolambda/zrnt/eth2/util/math" @@ -75,7 +77,7 @@ func Run() { } configPath := fs.String("config", "config/default.config.yml", "Path to the config file") - fs.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases") + fs.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases, collect-notifications, collect-user-db-notifications") fs.Uint64Var(&opts.StartEpoch, "start-epoch", 0, "start epoch") fs.Uint64Var(&opts.EndEpoch, "end-epoch", 0, "end epoch") fs.Uint64Var(&opts.User, "user", 0, "user id") @@ -181,27 +183,27 @@ func Run() { defer db.FrontendWriterDB.Close() // clickhouse - db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ - Username: cfg.ClickHouse.WriterDatabase.Username, - Password: cfg.ClickHouse.WriterDatabase.Password, - Name: cfg.ClickHouse.WriterDatabase.Name, - Host: cfg.ClickHouse.WriterDatabase.Host, - Port: cfg.ClickHouse.WriterDatabase.Port, - MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, - SSL: true, - MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, - }, &types.DatabaseConfig{ - Username: cfg.ClickHouse.ReaderDatabase.Username, - Password: cfg.ClickHouse.ReaderDatabase.Password, - Name: cfg.ClickHouse.ReaderDatabase.Name, - Host: cfg.ClickHouse.ReaderDatabase.Host, - Port: cfg.ClickHouse.ReaderDatabase.Port, - MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, - SSL: true, - MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, - }, "clickhouse", "clickhouse") - defer db.ClickHouseReader.Close() - defer db.ClickHouseWriter.Close() + // db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ + // Username: cfg.ClickHouse.WriterDatabase.Username, + // Password: cfg.ClickHouse.WriterDatabase.Password, + // Name: cfg.ClickHouse.WriterDatabase.Name, + // Host: cfg.ClickHouse.WriterDatabase.Host, + // Port: cfg.ClickHouse.WriterDatabase.Port, + // MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, + // SSL: true, + // MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, + // }, &types.DatabaseConfig{ + // Username: cfg.ClickHouse.ReaderDatabase.Username, + // Password: cfg.ClickHouse.ReaderDatabase.Password, + // Name: cfg.ClickHouse.ReaderDatabase.Name, + // Host: cfg.ClickHouse.ReaderDatabase.Host, + // Port: cfg.ClickHouse.ReaderDatabase.Port, + // MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, + // SSL: true, + // MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, + // }, "clickhouse", "clickhouse") + // defer db.ClickHouseReader.Close() + // defer db.ClickHouseWriter.Close() // Initialize the persistent redis client rdc := redis.NewClient(&redis.Options{ @@ -216,6 +218,14 @@ func Run() { db.PersistentRedisDbClient = rdc defer db.PersistentRedisDbClient.Close() + if utils.Config.TieredCacheProvider != "redis" { + log.Fatal(nil, "no cache provider set, please set TierdCacheProvider (redis)", 0) + } + if utils.Config.TieredCacheProvider == "redis" || len(utils.Config.RedisCacheEndpoint) != 0 { + cache.MustInitTieredCache(utils.Config.RedisCacheEndpoint) + log.Infof("tiered Cache initialized, latest finalized epoch: %v", cache.LatestFinalizedEpoch.Get()) + } + switch opts.Command { case "nameValidatorsByRanges": err := nameValidatorsByRanges(opts.ValidatorNameRanges) @@ -456,6 +466,10 @@ func Run() { err = fixEns(erigonClient) case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) + case "collect-notifications": + err = collectNotifications(opts.StartEpoch) + case "collect-user-db-notifications": + err = collectUserDbNotifications(opts.StartEpoch) default: log.Fatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -467,6 +481,35 @@ func Run() { } } +func collectNotifications(startEpoch uint64) error { + epoch := startEpoch + + log.Infof("collecting notifications for epoch %v", epoch) + notifications, err := notification.GetNotificationsForEpoch(utils.Config.Notifications.PubkeyCachePath, epoch) + if err != nil { + return err + } + + log.Infof("found %v notifications for epoch %v with %v notifications for user 0", len(notifications), epoch, len(notifications[0])) + if len(notifications[0]) > 0 { + spew.Dump(notifications[0]) + } + return nil +} + +func collectUserDbNotifications(startEpoch uint64) error { + epoch := startEpoch + + log.Infof("collecting notifications for epoch %v", epoch) + notifications, err := notification.GetUserNotificationsForEpoch(utils.Config.Notifications.PubkeyCachePath, epoch) + if err != nil { + return err + } + + log.Infof("found %v notifications for epoch %v", len(notifications), epoch) + return nil +} + func fixEns(erigonClient *rpc.ErigonClient) error { log.Infof("command: fix-ens") addrs := []struct { diff --git a/backend/cmd/monitoring/main.go b/backend/cmd/monitoring/main.go index a4bef3a60..eef141caf 100644 --- a/backend/cmd/monitoring/main.go +++ b/backend/cmd/monitoring/main.go @@ -6,6 +6,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/commons/version" @@ -31,6 +32,15 @@ func Run() { } utils.Config = cfg + if utils.Config.Metrics.Enabled { + go func() { + log.Infof("serving metrics on %v", utils.Config.Metrics.Address) + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { + log.Fatal(err, "error serving metrics", 0) + } + }() + } + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ Username: cfg.ClickHouse.WriterDatabase.Username, Password: cfg.ClickHouse.WriterDatabase.Password, diff --git a/backend/cmd/typescript_converter/main.go b/backend/cmd/typescript_converter/main.go index da6fa5be9..8c4e81433 100644 --- a/backend/cmd/typescript_converter/main.go +++ b/backend/cmd/typescript_converter/main.go @@ -3,6 +3,7 @@ package typescript_converter import ( "flag" "go/ast" + "iter" "os" "path/filepath" "slices" @@ -18,6 +19,8 @@ const ( fallbackType = "any" commonFileName = "common" lintDisable = "/* eslint-disable */\n" + goFileSuffix = ".go" + tsFileSuffix = ".ts" ) // Files that should not be converted to TypeScript @@ -65,24 +68,23 @@ func Run() { log.Fatal(nil, "Failed to load package", 0) } - // Find all common types + // Find all common types, i.e. types that are used in multiple files and must be imported in ts commonTypes := getCommonTypes(pkgs) - // Find all usages of common types - usage := getCommonUsages(pkgs, commonTypes) - - // Generate Tygo for common.go - tygos := []*tygo.Tygo{tygo.New(getTygoConfig(out, commonFileName, ""))} - // Generate Tygo for each file - for file, typesUsed := range usage { - importStr := "" + // Find imports (usages of common types) for each file + imports := getImports(pkgs, commonTypes) + + var configs []*tygo.Tygo + // Generate Tygo config for each file + for fileName, typesUsed := range imports { + var importStr string if len(typesUsed) > 0 { importStr = "import type { " + strings.Join(typesUsed, ", ") + " } from './" + commonFileName + "'\n" } - tygos = append(tygos, tygo.New(getTygoConfig(out, file, importStr))) + configs = append(configs, tygo.New(getTygoConfig(out, fileName, importStr))) } // Generate TypeScript - for _, tygo := range tygos { + for _, tygo := range configs { err := tygo.Generate() if err != nil { log.Fatal(err, "Failed to generate TypeScript", 0) @@ -93,7 +95,7 @@ func Run() { } func deleteFiles(out string) error { - files, err := filepath.Glob(out + "*.ts") + files, err := filepath.Glob(out + "*" + tsFileSuffix) if err != nil { return err } @@ -106,68 +108,82 @@ func deleteFiles(out string) error { return nil } -func getTygoConfig(out, file, frontmatter string) *tygo.Config { +func getTygoConfig(outDir, fileName, frontmatter string) *tygo.Config { return &tygo.Config{ Packages: []*tygo.PackageConfig{ { Path: packagePath, TypeMappings: typeMappings, FallbackType: fallbackType, - IncludeFiles: []string{file + ".go"}, - OutputPath: out + file + ".ts", + IncludeFiles: []string{fileName + goFileSuffix}, + OutputPath: outDir + fileName + tsFileSuffix, Frontmatter: lintDisable + frontmatter, }, }, } } -// Parse common.go to find all common types -func getCommonTypes(pkgs []*packages.Package) map[string]bool { - commonTypes := make(map[string]bool) - for _, pkg := range pkgs { - for _, file := range pkg.Syntax { - filename := strings.TrimSuffix(filepath.Base(pkg.Fset.File(file.Pos()).Name()), ".go") - if filepath.Base(filename) != commonFileName { - continue - } - ast.Inspect(file, func(n ast.Node) bool { - if typeSpec, ok := n.(*ast.TypeSpec); ok { - commonTypes[typeSpec.Name.Name] = true +// Iterate over all file names and files in the packages +func allFiles(pkgs []*packages.Package) iter.Seq2[string, *ast.File] { + return func(yield func(string, *ast.File) bool) { + for _, pkg := range pkgs { + for _, file := range pkg.Syntax { + fileName := filepath.Base(pkg.Fset.File(file.Pos()).Name()) + if !yield(fileName, file) { + return } - return true - }) - return commonTypes + } } } - return nil +} + +// Parse common.go to find all common types +func getCommonTypes(pkgs []*packages.Package) map[string]struct{} { + var commonFile *ast.File + // find common file + for fileName, file := range allFiles(pkgs) { + fileName = strings.TrimSuffix(fileName, goFileSuffix) + if filepath.Base(fileName) == commonFileName { + commonFile = file + break + } + } + if commonFile == nil { + log.Fatal(nil, "common.go not found", 0) + } + commonTypes := make(map[string]struct{}) + // iterate over all types in common file and add them to the map + for node := range ast.Preorder(commonFile) { + if typeSpec, ok := node.(*ast.TypeSpec); ok { + commonTypes[typeSpec.Name.Name] = struct{}{} + } + } + return commonTypes } // Parse all files to find used common types for each file -func getCommonUsages(pkgs []*packages.Package, commonTypes map[string]bool) map[string][]string { - usage := make(map[string][]string) // Map from file to list of commonTypes used - for _, pkg := range pkgs { - for _, file := range pkg.Syntax { - filename := strings.TrimSuffix(filepath.Base(pkg.Fset.File(file.Pos()).Name()), ".go") - if filepath.Base(filename) == commonFileName || slices.Contains(ignoredFiles, filename) { +// Returns a map with file name as key and a set of common types used in the file as value +func getImports(pkgs []*packages.Package, commonTypes map[string]struct{}) map[string][]string { + imports := make(map[string][]string) // Map from file to set of commonTypes used + imports[commonFileName] = []string{} // Add common file to map with empty set + for fileName, file := range allFiles(pkgs) { + fileName = strings.TrimSuffix(fileName, goFileSuffix) + if filepath.Base(fileName) == commonFileName || slices.Contains(ignoredFiles, fileName) { + continue + } + var currentFileImports []string + // iterate over all struct fields in the file + for node := range ast.Preorder(file) { + ident, ok := node.(*ast.Ident) + if !ok { continue } - if _, exists := usage[filename]; !exists { - usage[filename] = make([]string, 0) + _, isCommonType := commonTypes[ident.Name] + if isCommonType && !slices.Contains(currentFileImports, ident.Name) { + currentFileImports = append(currentFileImports, ident.Name) } - ast.Inspect(file, func(n ast.Node) bool { - ident, ok := n.(*ast.Ident) - if !ok { - return true - } - if !commonTypes[ident.Name] { - return true - } - if !slices.Contains(usage[filename], ident.Name) { - usage[filename] = append(usage[filename], ident.Name) - } - return true - }) } + imports[fileName] = currentFileImports } - return usage + return imports } diff --git a/backend/cmd/user_service/main.go b/backend/cmd/user_service/main.go index 79ba47060..76e634dda 100644 --- a/backend/cmd/user_service/main.go +++ b/backend/cmd/user_service/main.go @@ -93,6 +93,30 @@ func Run() { }, "pgx", "postgres") }() + wg.Add(1) + go func() { + defer wg.Done() + db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ + Username: utils.Config.WriterDatabase.Username, + Password: utils.Config.WriterDatabase.Password, + Name: utils.Config.WriterDatabase.Name, + Host: utils.Config.WriterDatabase.Host, + Port: utils.Config.WriterDatabase.Port, + MaxOpenConns: utils.Config.WriterDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.WriterDatabase.MaxIdleConns, + SSL: utils.Config.WriterDatabase.SSL, + }, &types.DatabaseConfig{ + Username: utils.Config.ReaderDatabase.Username, + Password: utils.Config.ReaderDatabase.Password, + Name: utils.Config.ReaderDatabase.Name, + Host: utils.Config.ReaderDatabase.Host, + Port: utils.Config.ReaderDatabase.Port, + MaxOpenConns: utils.Config.ReaderDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.ReaderDatabase.MaxIdleConns, + SSL: utils.Config.ReaderDatabase.SSL, + }, "pgx", "postgres") + }() + // if needed, init the database, cache or bigtable wg.Wait() diff --git a/backend/go.mod b/backend/go.mod index 59fc22b83..934129130 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -6,6 +6,7 @@ require ( cloud.google.com/go/bigtable v1.21.0 cloud.google.com/go/secretmanager v1.11.5 firebase.google.com/go v3.13.0+incompatible + firebase.google.com/go/v4 v4.14.1 github.com/ClickHouse/clickhouse-go/v2 v2.17.1 github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 github.com/alexedwards/scs/redisstore v0.0.0-20240316134038-7e11d57e8885 @@ -13,6 +14,7 @@ require ( github.com/attestantio/go-eth2-client v0.19.10 github.com/awa/go-iap v1.26.5 github.com/aws/aws-sdk-go-v2 v1.25.0 + github.com/aws/aws-sdk-go-v2/config v1.18.45 github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 github.com/bwmarrin/snowflake v0.3.0 @@ -30,7 +32,7 @@ require ( github.com/gobitfly/eth.store v0.0.0-20240312111708-b43f13990280 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/gomodule/redigo v1.9.2 github.com/google/uuid v1.6.0 github.com/gorilla/csrf v1.7.2 @@ -70,26 +72,27 @@ require ( github.com/wealdtech/go-eth2-types/v2 v2.8.2 github.com/wealdtech/go-eth2-util v1.8.0 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/crypto v0.19.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.18.0 - google.golang.org/api v0.164.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/api v0.170.0 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/firestore v1.14.0 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - cloud.google.com/go/storage v1.36.0 // indirect + cloud.google.com/go/firestore v1.15.0 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect + cloud.google.com/go/storage v1.40.0 // indirect github.com/ClickHouse/ch-go v0.58.2 // indirect + github.com/MicahParks/keyfunc v1.9.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 // indirect github.com/ajg/form v1.5.1 // indirect @@ -97,13 +100,18 @@ require ( github.com/alessio/shellescape v1.4.1 // indirect github.com/andybalholm/brotli v1.0.6 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 // indirect github.com/aws/smithy-go v1.20.0 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -153,9 +161,10 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/herumi/bls-eth-go-binary v1.31.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huandu/go-clone v1.6.0 // indirect @@ -246,24 +255,25 @@ require ( github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/grpc v1.62.0 // indirect + google.golang.org/appengine/v2 v2.0.2 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/grpc v1.62.1 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/backend/go.sum b/backend/go.sum index 34c1e36af..cc784e35b 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,26 +1,28 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/bigtable v1.21.0 h1:BFN4jhkA9ULYYV2Ug7AeOtetVLnN2jKuIq5TcRc5C38= cloud.google.com/go/bigtable v1.21.0/go.mod h1:V0sYNRtk0dgAKjyRr/MyBpHpSXqh+9P39euf820EZ74= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8= +cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +firebase.google.com/go/v4 v4.14.1 h1:4qiUETaFRWoFGE1XP5VbcEdtPX93Qs+8B/7KvP2825g= +firebase.google.com/go/v4 v4.14.1/go.mod h1:fgk2XshgNDEKaioKco+AouiegSI9oTWVqRaBdTTGBoM= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -37,6 +39,8 @@ github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 h1:HcdvlzaQ4CJfH7xbfJZ3ZHN//BTEpId46iKEMuP3wHE= github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21/go.mod h1:7PODFS++oNZ6khojmPBvkrDeFO/hrc3jmvWvQAOXorw= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/MicahParks/keyfunc v1.9.0 h1:lhKd5xrFHLNOWrDc4Tyb/Q1AJ4LCzQ48GVJyVIID3+o= +github.com/MicahParks/keyfunc v1.9.0/go.mod h1:IdnCilugA0O/99dW+/MkvlyrsX8+L8+x95xuVNtM5jw= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -69,8 +73,11 @@ github.com/aws/aws-sdk-go-v2 v1.25.0 h1:sv7+1JVJxOu/dD/sz/csHX7jFqmP001TIY7aytBW github.com/aws/aws-sdk-go-v2 v1.25.0/go.mod h1:G104G1Aho5WqF+SR3mDIobTABQzpYV0WxMsKxlMggOA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 h1:2UO6/nT1lCZq1LqM67Oa4tdgP1CvL1sLSxvuD+VrOeE= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0/go.mod h1:5zGj2eA85ClyedTDK+Whsu+w9yimnVIZvhvBKrDquM8= +github.com/aws/aws-sdk-go-v2/config v1.18.45 h1:Aka9bI7n8ysuwPeFdm77nfbyHCAKQ3z9ghB3S/38zes= +github.com/aws/aws-sdk-go-v2/config v1.18.45/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= github.com/aws/aws-sdk-go-v2/credentials v1.13.43 h1:LU8vo40zBlo3R7bAvBVy/ku4nxGEyZe9N8MqAeFTzF8= github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13 h1:PIktER+hwIG286DqXyvVENjgLTAwGgoeriLDD5C+YlQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw= @@ -78,6 +85,8 @@ github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylga github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0/go.mod h1:hL6BWM/d/qz113fVitZjbXR0E+RCTU1+x+1Idyn5NgE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45 h1:hze8YsjSh8Wl1rYa1CJpRmXP21BvOBuc76YhW0HsuQ4= +github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 h1:TkbRExyKSVHELwG9gz2+gql37jjec2R5vus9faTomwE= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0/go.mod h1:T3/9xMKudHhnj8it5EqIrhvv11tVZqWYkKcot+BFStc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8= @@ -91,8 +100,11 @@ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 h1:l5puwOHr7IxECu github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E= github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0 h1:VfU15izXQjz4m9y1DkbY79iylIiuPwWtrram4cSpWEI= github.com/aws/aws-sdk-go-v2/service/s3 v1.49.0/go.mod h1:1o/W6JFUuREj2ExoQ21vHJgO7wakvjhol91M9eknFgs= +github.com/aws/aws-sdk-go-v2/service/sso v1.15.2 h1:JuPGc7IkOP4AaqcZSIcyqLpFSqBWK32rM9+a1g6u73k= github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3 h1:HFiiRkf1SdaAmV3/BHOFZ9DjFynPHj8G/UIO1lQS+fk= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= +github.com/aws/aws-sdk-go-v2/service/sts v1.23.2 h1:0BkLfgeDjfZnZ+MhB3ONb01u9pwFYTCZVhlsSSBvlbU= github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.20.0 h1:6+kZsCXZwKxZS9RfISnPc4EXlHoyAkm2hPuM8X2BrrQ= @@ -331,6 +343,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -342,6 +355,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -353,8 +367,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -398,8 +412,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -427,6 +441,8 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w= github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= @@ -988,18 +1004,18 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1041,8 +1057,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -1079,12 +1095,13 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1136,13 +1153,13 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1184,28 +1201,30 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.164.0 h1:of5G3oE2WRMVb2yoWKME4ZP8y8zpUKC6bMhxDr8ifyk= -google.golang.org/api v0.164.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/appengine/v2 v2.0.2 h1:MSqyWy2shDLwG7chbwBJ5uMyw6SNqJzhJHNDwYB0Akk= +google.golang.org/appengine/v2 v2.0.2/go.mod h1:PkgRUWz4o1XOvbqtWTkBtCitEJ5Tp4HoVEdMMYQR/8E= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1218,8 +1237,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/backend/pkg/api/data_access/data_access.go b/backend/pkg/api/data_access/data_access.go index 3fc31d105..047d471a6 100644 --- a/backend/pkg/api/data_access/data_access.go +++ b/backend/pkg/api/data_access/data_access.go @@ -30,6 +30,7 @@ type DataAccessor interface { ProtocolRepository RatelimitRepository HealthzRepository + MachineRepository StartDataAccessServices() Close() diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 7bee9a38c..2d667417b 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -6,11 +6,13 @@ import ( "fmt" "math/rand/v2" "reflect" + "slices" "time" "github.com/go-faker/faker/v4" "github.com/go-faker/faker/v4/pkg/options" "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/types" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/userservice" "github.com/shopspring/decimal" @@ -253,10 +255,6 @@ func (d *DummyService) GetValidatorDashboardGroupExists(ctx context.Context, das return true, nil } -func (d *DummyService) GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) { - return getDummyData[uint64]() -} - func (d *DummyService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { return getDummyData[[]t.VDBPostValidatorsData]() } @@ -398,7 +396,20 @@ func (d *DummyService) GetValidatorDashboardRocketPoolMinipools(ctx context.Cont } func (d *DummyService) GetAllNetworks() ([]t.NetworkInfo, error) { - return getDummyData[[]t.NetworkInfo]() + return []types.NetworkInfo{ + { + ChainId: 1, + Name: "ethereum", + }, + { + ChainId: 100, + Name: "gnosis", + }, + { + ChainId: 17000, + Name: "holesky", + }, + }, nil } func (d *DummyService) GetSearchValidatorByIndex(ctx context.Context, chainId, index uint64) (*t.SearchValidator, error) { @@ -448,7 +459,7 @@ func (d *DummyService) GetValidatorDashboardPublicIdCount(ctx context.Context, d func (d *DummyService) GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) { return getDummyStruct[t.NotificationOverviewData]() } -func (d *DummyService) GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { +func (d *DummyService) GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { return getDummyWithPaging[t.NotificationDashboardsTableRow]() } @@ -469,7 +480,7 @@ func (d *DummyService) GetClientNotifications(ctx context.Context, userId uint64 func (d *DummyService) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) { return getDummyWithPaging[t.NotificationRocketPoolTableRow]() } -func (d *DummyService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { +func (d *DummyService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { return getDummyWithPaging[t.NotificationNetworksTableRow]() } @@ -488,6 +499,11 @@ func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Contex func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { return nil } + +func (d *DummyService) UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) { + return getDummyStruct[t.NotificationSettingsClient]() +} + func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { r, p, err := getDummyWithPaging[t.NotificationSettingsDashboardsTableRow]() for i, n := range r { @@ -656,3 +672,20 @@ func (d *DummyService) IncrementBundleDeliveryCount(ctx context.Context, bundleV func (d *DummyService) GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) { return getDummyStruct[t.MobileWidgetData]() } + +func (d *DummyService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit uint64, offset uint64) (*types.MachineMetricsData, error) { + data, err := getDummyStruct[types.MachineMetricsData]() + if err != nil { + return nil, err + } + data.SystemMetrics = slices.SortedFunc(slices.Values(data.SystemMetrics), func(i, j *t.MachineMetricSystem) int { + return int(i.Timestamp) - int(j.Timestamp) + }) + data.ValidatorMetrics = slices.SortedFunc(slices.Values(data.ValidatorMetrics), func(i, j *t.MachineMetricValidator) int { + return int(i.Timestamp) - int(j.Timestamp) + }) + data.NodeMetrics = slices.SortedFunc(slices.Values(data.NodeMetrics), func(i, j *t.MachineMetricNode) int { + return int(i.Timestamp) - int(j.Timestamp) + }) + return data, nil +} diff --git a/backend/pkg/api/data_access/machine_metrics.go b/backend/pkg/api/data_access/machine_metrics.go new file mode 100644 index 000000000..3b9935fcf --- /dev/null +++ b/backend/pkg/api/data_access/machine_metrics.go @@ -0,0 +1,15 @@ +package dataaccess + +import ( + "context" + + "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type MachineRepository interface { + GetUserMachineMetrics(context context.Context, userID uint64, limit uint64, offset uint64) (*types.MachineMetricsData, error) +} + +func (d *DataAccessService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit uint64, offset uint64) (*types.MachineMetricsData, error) { + return d.dummy.GetUserMachineMetrics(ctx, userID, limit, offset) +} diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 5389847db..813caca74 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -10,7 +10,7 @@ import ( type NotificationsRepository interface { GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) - GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) + GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) // depending on how notifications are implemented, we may need to use something other than `notificationId` for identifying the notification GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64) (*t.NotificationValidatorDashboardDetail, error) GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64) (*t.NotificationAccountDashboardDetail, error) @@ -18,13 +18,14 @@ type NotificationsRepository interface { GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) GetClientNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationClientsColumn], search string, limit uint64) ([]t.NotificationClientsTableRow, *t.Paging, error) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) - GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) + GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error + UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error UpdateNotificationSettingsAccountDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error @@ -33,8 +34,8 @@ type NotificationsRepository interface { func (d *DataAccessService) GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) { return d.dummy.GetNotificationOverview(ctx, userId) } -func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { - return d.dummy.GetDashboardNotifications(ctx, userId, chainId, cursor, colSort, search, limit) +func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userId uint64, chainIds []uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { + return d.dummy.GetDashboardNotifications(ctx, userId, chainIds, cursor, colSort, search, limit) } func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64) (*t.NotificationValidatorDashboardDetail, error) { @@ -54,8 +55,8 @@ func (d *DataAccessService) GetClientNotifications(ctx context.Context, userId u func (d *DataAccessService) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) { return d.dummy.GetRocketPoolNotifications(ctx, userId, cursor, colSort, search, limit) } -func (d *DataAccessService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { - return d.dummy.GetNetworkNotifications(ctx, userId, cursor, colSort, search, limit) +func (d *DataAccessService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { + return d.dummy.GetNetworkNotifications(ctx, userId, cursor, colSort, limit) } func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) { return d.dummy.GetNotificationSettings(ctx, userId) @@ -72,6 +73,9 @@ func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.C func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { return d.dummy.DeleteNotificationSettingsPairedDevice(ctx, userId, pairedDeviceId) } +func (d *DataAccessService) UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) { + return d.dummy.UpdateNotificationSettingsClients(ctx, userId, clientId, IsSubscribed) +} func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { return d.dummy.GetNotificationSettingsDashboards(ctx, userId, cursor, colSort, search, limit) } diff --git a/backend/pkg/api/data_access/vdb.go b/backend/pkg/api/data_access/vdb.go index e7498702f..da907a294 100644 --- a/backend/pkg/api/data_access/vdb.go +++ b/backend/pkg/api/data_access/vdb.go @@ -28,7 +28,6 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardGroupCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) GetValidatorDashboardGroupExists(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64) (bool, error) - GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) diff --git a/backend/pkg/api/data_access/vdb_deposits.go b/backend/pkg/api/data_access/vdb_deposits.go index db2026a23..0f81e1d93 100644 --- a/backend/pkg/api/data_access/vdb_deposits.go +++ b/backend/pkg/api/data_access/vdb_deposits.go @@ -183,6 +183,7 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, for i := range data { responseData[i].From = *addressMapping[string(responseData[i].From.Hash)] responseData[i].From.IsContract = fromContractStatuses[i] == types.CONTRACT_CREATION || fromContractStatuses[i] == types.CONTRACT_PRESENT + responseData[i].Depositor = *addressMapping[string(responseData[i].Depositor.Hash)] responseData[i].Depositor.IsContract = responseData[i].From.IsContract if responseData[i].Depositor.Hash != responseData[i].From.Hash { responseData[i].Depositor.IsContract = depositorContractStatuses[depositorIdx] == types.CONTRACT_CREATION || depositorContractStatuses[depositorIdx] == types.CONTRACT_PRESENT diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index d415f0a23..b82bbff30 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "fmt" "math/big" + "slices" "sort" "strconv" "strings" @@ -15,7 +16,6 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" - "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/utils" constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" "github.com/lib/pq" @@ -790,21 +790,6 @@ func (d *DataAccessService) GetValidatorDashboardGroupExists(ctx context.Context return groupExists, err } -// return how many of the passed validators are already in the dashboard -func (d *DataAccessService) GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) { - if len(validators) == 0 { - return 0, nil - } - - var count uint64 - err := d.alloyReader.GetContext(ctx, &count, ` - SELECT COUNT(*) - FROM users_val_dashboards_validators - WHERE dashboard_id = $1 AND validator_index = ANY($2) - `, dashboardId, pq.Array(validators)) - return count, err -} - func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { if len(validators) == 0 { // No validators to add @@ -889,191 +874,145 @@ func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, return result, nil } +// Updates the group for validators already in the dashboard linked to the deposit address. +// Adds up to limit new validators associated with the deposit address, if not already in the dashboard. func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - // for all validators already in the dashboard that are associated with the deposit address, update the group - // then add no more than `limit` validators associated with the deposit address to the dashboard addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x")) if err != nil { return nil, err } - if len(addressParsed) != 20 { - return nil, fmt.Errorf("invalid deposit address: %s", address) - } - var validatorIndicesToAdd []uint64 - err = d.readerDb.SelectContext(ctx, &validatorIndicesToAdd, "SELECT validatorindex FROM validators WHERE pubkey IN (SELECT publickey FROM eth1_deposits WHERE from_address = $1) ORDER BY validatorindex LIMIT $2;", addressParsed, limit) - if err != nil { - return nil, err - } + g, gCtx := errgroup.WithContext(ctx) - // retrieve the existing validators - var existingValidators []uint64 - err = d.alloyWriter.SelectContext(ctx, &existingValidators, "SELECT validator_index FROM users_val_dashboards_validators WHERE dashboard_id = $1", dashboardId) - if err != nil { - return nil, err - } - existingValidatorsMap := make(map[uint64]bool, len(existingValidators)) - for _, validatorIndex := range existingValidators { - existingValidatorsMap[validatorIndex] = true - } - - // filter out the validators that are already in the dashboard + // fetch validators that are already in the dashboard and associated with the deposit address var validatorIndicesToUpdate []uint64 + + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, ` + SELECT DISTINCT uvdv.validator_index + FROM validators v + JOIN eth1_deposits d ON v.pubkey = d.publickey + JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index + WHERE uvdv.dashboard_id = $1 AND d.from_address = $2; + `, dashboardId, addressParsed) + }) + + // fetch validators that are not yet in the dashboard and associated with the deposit address, up to the limit var validatorIndicesToInsert []uint64 - for _, validatorIndex := range validatorIndicesToAdd { - if _, ok := existingValidatorsMap[validatorIndex]; ok { - validatorIndicesToUpdate = append(validatorIndicesToUpdate, validatorIndex) - } else { - validatorIndicesToInsert = append(validatorIndicesToInsert, validatorIndex) - } - } + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, ` + SELECT DISTINCT v.validatorindex + FROM validators v + JOIN eth1_deposits d ON v.pubkey = d.publickey + LEFT JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1 + WHERE d.from_address = $2 AND uvdv.validator_index IS NULL + ORDER BY v.validatorindex + LIMIT $3; + `, dashboardId, addressParsed, limit) + }) - // update the group for all existing validators - validatorIndices := make([]uint64, 0, int(limit)) - validatorIndices = append(validatorIndices, validatorIndicesToUpdate...) - - // insert the new validators up to the allowed user max limit taking into account how many validators are already in the dashboard - if len(validatorIndicesToInsert) > 0 { - freeSpace := int(limit) - len(existingValidators) - if freeSpace > 0 { - if len(validatorIndicesToInsert) > freeSpace { // cap inserts to the amount of free space available - log.Infof("limiting the number of validators to insert to %d", freeSpace) - validatorIndicesToInsert = validatorIndicesToInsert[:freeSpace] - } - validatorIndices = append(validatorIndices, validatorIndicesToInsert...) - } + err = g.Wait() + if err != nil { + return nil, err } - if len(validatorIndices) == 0 { - // no validators to add - return []t.VDBPostValidatorsData{}, nil - } - log.Infof("inserting %d new validators and updating %d validators of dashboard %d, limit is %d", len(validatorIndicesToInsert), len(validatorIndicesToUpdate), dashboardId, limit) + validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert) + return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices) } +// Updates the group for validators already in the dashboard linked to the withdrawal address. +// Adds up to limit new validators associated with the withdrawal address, if not already in the dashboard. func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - // for all validators already in the dashboard that are associated with the withdrawal address, update the group - // then add no more than `limit` validators associated with the deposit address to the dashboard addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x")) if err != nil { return nil, err } - var validatorIndicesToAdd []uint64 - err = d.readerDb.SelectContext(ctx, &validatorIndicesToAdd, "SELECT validatorindex FROM validators WHERE withdrawalcredentials = $1 ORDER BY validatorindex LIMIT $2;", addressParsed, limit) - if err != nil { - return nil, err - } - // retrieve the existing validators - var existingValidators []uint64 - err = d.alloyWriter.SelectContext(ctx, &existingValidators, "SELECT validator_index FROM users_val_dashboards_validators WHERE dashboard_id = $1", dashboardId) - if err != nil { - return nil, err - } - existingValidatorsMap := make(map[uint64]bool, len(existingValidators)) - for _, validatorIndex := range existingValidators { - existingValidatorsMap[validatorIndex] = true - } + g, gCtx := errgroup.WithContext(ctx) - // filter out the validators that are already in the dashboard + // fetch validators that are already in the dashboard and associated with the withdrawal address var validatorIndicesToUpdate []uint64 + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, ` + SELECT DISTINCT uvdv.validator_index + FROM validators v + JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index + WHERE uvdv.dashboard_id = $1 AND v.withdrawalcredentials = $2 AND uvdv.dashboard_id = $2; + `, dashboardId, addressParsed) + }) + + // fetch validators that are not yet in the dashboard and associated with the withdrawal address, up to the limit var validatorIndicesToInsert []uint64 - for _, validatorIndex := range validatorIndicesToAdd { - if _, ok := existingValidatorsMap[validatorIndex]; ok { - validatorIndicesToUpdate = append(validatorIndicesToUpdate, validatorIndex) - } else { - validatorIndicesToInsert = append(validatorIndicesToInsert, validatorIndex) - } - } + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, ` + SELECT DISTINCT v.validatorindex + FROM validators v + LEFT JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1 + WHERE v.withdrawalcredentials = $2 AND uvdv.validator_index IS NULL + ORDER BY v.validatorindex + LIMIT $3; + `, dashboardId, addressParsed, limit) + }) - // update the group for all existing validators - validatorIndices := make([]uint64, 0, int(limit)) - validatorIndices = append(validatorIndices, validatorIndicesToUpdate...) - - // insert the new validators up to the allowed user max limit taking into account how many validators are already in the dashboard - if len(validatorIndicesToInsert) > 0 { - freeSpace := int(limit) - len(existingValidators) - if freeSpace > 0 { - if len(validatorIndicesToInsert) > freeSpace { // cap inserts to the amount of free space available - log.Infof("limiting the number of validators to insert to %d", freeSpace) - validatorIndicesToInsert = validatorIndicesToInsert[:freeSpace] - } - validatorIndices = append(validatorIndices, validatorIndicesToInsert...) - } + err = g.Wait() + if err != nil { + return nil, err } - if len(validatorIndices) == 0 { - // no validators to add - return []t.VDBPostValidatorsData{}, nil - } - log.Infof("inserting %d new validators and updating %d validators of dashboard %d, limit is %d", len(validatorIndicesToInsert), len(validatorIndicesToUpdate), dashboardId, limit) + validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert) + return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices) } +// Update the group for validators already in the dashboard linked to the graffiti (via produced block). +// Add up to limit new validators associated with the graffiti, if not already in the dashboard. func (d *DataAccessService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) { - // for all validators already in the dashboard that are associated with the graffiti (by produced block), update the group - // then add no more than `limit` validators associated with the deposit address to the dashboard - var validatorIndicesToAdd []uint64 - err := d.readerDb.SelectContext(ctx, &validatorIndicesToAdd, "SELECT DISTINCT proposer FROM blocks WHERE graffiti_text = $1 ORDER BY proposer LIMIT $2;", graffiti, limit) - if err != nil { - return nil, err - } + g, gCtx := errgroup.WithContext(ctx) - // retrieve the existing validators - var existingValidators []uint64 - err = d.alloyWriter.SelectContext(ctx, &existingValidators, "SELECT validator_index FROM users_val_dashboards_validators WHERE dashboard_id = $1", dashboardId) - if err != nil { - return nil, err - } - existingValidatorsMap := make(map[uint64]bool, len(existingValidators)) - for _, validatorIndex := range existingValidators { - existingValidatorsMap[validatorIndex] = true - } - - // filter out the validators that are already in the dashboard + // fetch validators that are already in the dashboard and associated with the graffiti var validatorIndicesToUpdate []uint64 + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, ` + SELECT DISTINCT uvdv.validator_index + FROM blocks b + JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index + WHERE uvdv.dashboard_id = $1 AND b.graffiti_text = $2; + `, dashboardId, graffiti) + }) + + // fetch validators that are not yet in the dashboard and associated with the graffiti, up to the limit var validatorIndicesToInsert []uint64 - for _, validatorIndex := range validatorIndicesToAdd { - if _, ok := existingValidatorsMap[validatorIndex]; ok { - validatorIndicesToUpdate = append(validatorIndicesToUpdate, validatorIndex) - } else { - validatorIndicesToInsert = append(validatorIndicesToInsert, validatorIndex) - } - } + g.Go(func() error { + return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, ` + SELECT DISTINCT b.proposer + FROM blocks b + LEFT JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index AND uvdv.dashboard_id = $1 + WHERE b.graffiti_text = $2 AND uvdv.validator_index IS NULL + ORDER BY b.proposer + LIMIT $3; + `, dashboardId, graffiti, limit) + }) - // update the group for all existing validators - validatorIndices := make([]uint64, 0, int(limit)) - validatorIndices = append(validatorIndices, validatorIndicesToUpdate...) - - // insert the new validators up to the allowed user max limit taking into account how many validators are already in the dashboard - if len(validatorIndicesToInsert) > 0 { - freeSpace := int(limit) - len(existingValidators) - if freeSpace > 0 { - if len(validatorIndicesToInsert) > freeSpace { // cap inserts to the amount of free space available - log.Infof("limiting the number of validators to insert to %d", freeSpace) - validatorIndicesToInsert = validatorIndicesToInsert[:freeSpace] - } - validatorIndices = append(validatorIndices, validatorIndicesToInsert...) - } + err := g.Wait() + if err != nil { + return nil, err } - if len(validatorIndices) == 0 { - // no validators to add - return []t.VDBPostValidatorsData{}, nil - } - log.Infof("inserting %d new validators and updating %d validators of dashboard %d, limit is %d", len(validatorIndicesToInsert), len(validatorIndicesToUpdate), dashboardId, limit) + validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert) + return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices) } func (d *DataAccessService) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error { if len(validators) == 0 { - // // Remove all validators for the dashboard - // _, err := d.alloyWriter.ExecContext(ctx, ` - // DELETE FROM users_val_dashboards_validators - // WHERE dashboard_id = $1 - // `, dashboardId) - return fmt.Errorf("calling RemoveValidatorDashboardValidators with empty validators list is not allowed") + // Remove all validators for the dashboard + // This is usually forbidden by API validation + _, err := d.alloyWriter.ExecContext(ctx, ` + DELETE FROM users_val_dashboards_validators + WHERE dashboard_id = $1 + `, dashboardId) + return err } //Create the query to delete validators diff --git a/backend/pkg/api/data_access/vdb_rewards.go b/backend/pkg/api/data_access/vdb_rewards.go index 2212bee81..06d51bbc2 100644 --- a/backend/pkg/api/data_access/vdb_rewards.go +++ b/backend/pkg/api/data_access/vdb_rewards.go @@ -106,12 +106,13 @@ func (d *DataAccessService) GetValidatorDashboardRewards(ctx context.Context, da LeftJoin(goqu.L("blocks b"), goqu.On(goqu.L("v.validator_index = b.proposer AND b.status = '1'"))). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ) @@ -561,12 +562,13 @@ func (d *DataAccessService) GetValidatorDashboardGroupRewards(ctx context.Contex LeftJoin(goqu.L("blocks b"), goqu.On(goqu.L("v.validator_index = b.proposer AND b.status = '1'"))). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch = ?", epoch)) @@ -736,12 +738,13 @@ func (d *DataAccessService) GetValidatorDashboardRewardsChart(ctx context.Contex LeftJoin(goqu.L("blocks b"), goqu.On(goqu.L("v.validator_index = b.proposer AND b.status = '1'"))). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch >= ?", startEpoch)) @@ -987,12 +990,13 @@ func (d *DataAccessService) GetValidatorDashboardDuties(ctx context.Context, das From(goqu.L("blocks b")). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch = ?", epoch)). diff --git a/backend/pkg/api/data_access/vdb_summary.go b/backend/pkg/api/data_access/vdb_summary.go index 310e9e2b4..58e29ad2a 100644 --- a/backend/pkg/api/data_access/vdb_summary.go +++ b/backend/pkg/api/data_access/vdb_summary.go @@ -191,12 +191,13 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da From(goqu.L("blocks b")). LeftJoin(goqu.L("execution_payloads ep"), goqu.On(goqu.L("ep.block_hash = b.exec_block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.epoch >= ? AND b.epoch <= ? AND b.status = '1'", epochMin, epochMax)). @@ -829,12 +830,13 @@ func (d *DataAccessService) internal_getElClAPR(ctx context.Context, dashboardId From(goqu.L("blocks AS b")). LeftJoin(goqu.L("execution_payloads AS ep"), goqu.On(goqu.L("b.exec_block_hash = ep.block_hash"))). LeftJoin( - goqu.Dialect("postgres"). + goqu.Lateral(goqu.Dialect("postgres"). From("relays_blocks"). Select( goqu.L("exec_block_hash"), goqu.MAX("value").As("value")). - GroupBy("exec_block_hash").As("rb"), + Where(goqu.L("relays_blocks.exec_block_hash = b.exec_block_hash")). + GroupBy("exec_block_hash")).As("rb"), goqu.On(goqu.L("rb.exec_block_hash = b.exec_block_hash")), ). Where(goqu.L("b.status = '1'")) diff --git a/backend/pkg/api/enums/notifications_enums.go b/backend/pkg/api/enums/notifications_enums.go index 4367c44ee..554ea2afe 100644 --- a/backend/pkg/api/enums/notifications_enums.go +++ b/backend/pkg/api/enums/notifications_enums.go @@ -10,7 +10,7 @@ var _ EnumFactory[NotificationDashboardsColumn] = NotificationDashboardsColumn(0 const ( NotificationDashboardChainId NotificationDashboardsColumn = iota NotificationDashboardTimestamp - NotificationDashboardDashboardId // sort by name + NotificationDashboardDashboardName // sort by name ) func (c NotificationDashboardsColumn) Int() int { @@ -23,8 +23,8 @@ func (NotificationDashboardsColumn) NewFromString(s string) NotificationDashboar return NotificationDashboardChainId case "timestamp": return NotificationDashboardTimestamp - case "dashboard_id": - return NotificationDashboardDashboardId + case "dashboard_name", "dashboard_id": // accepting id for frontend + return NotificationDashboardDashboardName default: return NotificationDashboardsColumn(-1) } @@ -37,7 +37,7 @@ var NotificationsDashboardsColumns = struct { }{ NotificationDashboardChainId, NotificationDashboardTimestamp, - NotificationDashboardDashboardId, + NotificationDashboardDashboardName, } // ------------------------------------------------------------ @@ -203,7 +203,7 @@ type NotificationSettingsDashboardColumn int var _ EnumFactory[NotificationSettingsDashboardColumn] = NotificationSettingsDashboardColumn(0) const ( - NotificationSettingsDashboardDashboardId NotificationSettingsDashboardColumn = iota + NotificationSettingsDashboardDashboardName NotificationSettingsDashboardColumn = iota NotificationSettingsDashboardGroupName ) @@ -213,8 +213,8 @@ func (c NotificationSettingsDashboardColumn) Int() int { func (NotificationSettingsDashboardColumn) NewFromString(s string) NotificationSettingsDashboardColumn { switch s { - case "dashboard_id": - return NotificationSettingsDashboardDashboardId + case "dashboard_name", "dashboard_id": + return NotificationSettingsDashboardDashboardName case "group_name": return NotificationSettingsDashboardGroupName default: @@ -226,6 +226,6 @@ var NotificationSettingsDashboardColumns = struct { DashboardId NotificationSettingsDashboardColumn GroupName NotificationSettingsDashboardColumn }{ - NotificationSettingsDashboardDashboardId, + NotificationSettingsDashboardDashboardName, NotificationSettingsDashboardGroupName, } diff --git a/backend/pkg/api/handlers/auth.go b/backend/pkg/api/handlers/auth.go index 62e55e23c..ee30cedda 100644 --- a/backend/pkg/api/handlers/auth.go +++ b/backend/pkg/api/handlers/auth.go @@ -1,6 +1,7 @@ package handlers import ( + "cmp" "context" "errors" "fmt" @@ -14,6 +15,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/mail" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" commonTypes "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/userservice" @@ -180,13 +182,15 @@ const authHeaderPrefix = "Bearer " func (h *HandlerService) GetUserIdByApiKey(r *http.Request) (uint64, error) { // TODO: store user id in context during ratelimting and use it here - var apiKey string - authHeader := r.Header.Get("Authorization") - if strings.HasPrefix(authHeader, authHeaderPrefix) { - apiKey = strings.TrimPrefix(authHeader, authHeaderPrefix) - } else { - apiKey = r.URL.Query().Get("api_key") - } + query := r.URL.Query() + header := r.Header + apiKey := cmp.Or( + strings.TrimPrefix(header.Get("Authorization"), authHeaderPrefix), + header.Get("X-Api-Key"), + query.Get("api_key"), + query.Get("apiKey"), + query.Get("apikey"), + ) if apiKey == "" { return 0, newUnauthorizedErr("missing api key") } @@ -708,6 +712,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * validationResult, err := userservice.VerifyReceipt(nil, nil, verifyPackage) if err != nil { log.Warn(err, "could not verify receipt %v", 0, map[string]interface{}{"receipt": verifyPackage.Receipt}) + metrics.Errors.WithLabelValues(fmt.Sprintf("appsub_verify_%s_failed", req.Transaction.Type)).Inc() if errors.Is(err, userservice.ErrClientInit) { log.Error(err, "Apple or Google client is NOT initialized. Did you provide their configuration?", 0, nil) handleErr(w, r, err) diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index ca7c41bb1..6b0d9e3c5 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -19,6 +19,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gorilla/mux" "github.com/invopop/jsonschema" + "github.com/shopspring/decimal" "github.com/xeipuuv/gojsonschema" "github.com/alexedwards/scs/v2" @@ -29,11 +30,12 @@ import ( ) type HandlerService struct { - dai dataaccess.DataAccessor - scs *scs.SessionManager + dai dataaccess.DataAccessor + scs *scs.SessionManager + isPostMachineMetricsEnabled bool // if more config options are needed, consider having the whole config in here } -func NewHandlerService(dataAccessor dataaccess.DataAccessor, sessionManager *scs.SessionManager) *HandlerService { +func NewHandlerService(dataAccessor dataaccess.DataAccessor, sessionManager *scs.SessionManager, enablePostMachineMetrics bool) *HandlerService { if allNetworks == nil { networks, err := dataAccessor.GetAllNetworks() if err != nil { @@ -43,8 +45,9 @@ func NewHandlerService(dataAccessor dataaccess.DataAccessor, sessionManager *scs } return &HandlerService{ - dai: dataAccessor, - scs: sessionManager, + dai: dataAccessor, + scs: sessionManager, + isPostMachineMetricsEnabled: enablePostMachineMetrics, } } @@ -250,6 +253,35 @@ func (v *validationError) checkUint(param, paramName string) uint64 { return num } +func (v *validationError) checkWeiDecimal(param, paramName string) decimal.Decimal { + dec := decimal.Zero + // check if only numbers are contained in the string with regex + if !reInteger.MatchString(param) { + v.add(paramName, fmt.Sprintf("given value '%s' is not a wei string (must be positive integer)", param)) + return dec + } + dec, err := decimal.NewFromString(param) + if err != nil { + v.add(paramName, fmt.Sprintf("given value '%s' is not a wei string (must be positive integer)", param)) + return dec + } + return dec +} + +func (v *validationError) checkWeiMinMax(param, paramName string, min, max decimal.Decimal) decimal.Decimal { + dec := v.checkWeiDecimal(param, paramName) + if v.hasErrors() { + return dec + } + if dec.LessThan(min) { + v.add(paramName, fmt.Sprintf("given value '%s' is too small, minimum value is %s", dec, min)) + } + if dec.GreaterThan(max) { + v.add(paramName, fmt.Sprintf("given value '%s' is too large, maximum value is %s", dec, max)) + } + return dec +} + func (v *validationError) checkBool(param, paramName string) bool { if param == "" { return false @@ -525,14 +557,10 @@ func checkEnum[T enums.EnumFactory[T]](v *validationError, enumString string, na return enum } -// checkEnumIsAllowed checks if the given enum is in the list of allowed enums. -func checkEnumIsAllowed[T enums.EnumFactory[T]](v *validationError, enum T, allowed []T, name string) { - if enums.IsInvalidEnum(enum) { - v.add(name, "parameter is missing or invalid, please check the API documentation") - return - } +// better func name would be +func checkValueInAllowed[T cmp.Ordered](v *validationError, value T, allowed []T, name string) { for _, a := range allowed { - if enum.Int() == a.Int() { + if cmp.Compare(value, a) == 0 { return } } @@ -658,6 +686,14 @@ func (v *validationError) checkNetworkParameter(param string) uint64 { return v.checkNetwork(intOrString{strValue: ¶m}) } +func (v *validationError) checkNetworksParameter(param string) []uint64 { + var chainIds []uint64 + for _, network := range splitParameters(param, ',') { + chainIds = append(chainIds, v.checkNetworkParameter(network)) + } + return chainIds +} + // isValidNetwork checks if the given network is a valid network. // It returns the chain id of the network and true if it is valid, otherwise 0 and false. func isValidNetwork(network intOrString) (uint64, bool) { diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 78d998856..bf3e347fe 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -609,6 +609,10 @@ func (h *HandlerService) InternalDeleteUserNotificationSettingsPairedDevices(w h h.PublicDeleteUserNotificationSettingsPairedDevices(w, r) } +func (h *HandlerService) InternalPutUserNotificationSettingsClient(w http.ResponseWriter, r *http.Request) { + h.PublicPutUserNotificationSettingsClient(w, r) +} + func (h *HandlerService) InternalGetUserNotificationSettingsDashboards(w http.ResponseWriter, r *http.Request) { h.PublicGetUserNotificationSettingsDashboards(w, r) } diff --git a/backend/pkg/api/handlers/machine_metrics.go b/backend/pkg/api/handlers/machine_metrics.go new file mode 100644 index 000000000..5a84ce435 --- /dev/null +++ b/backend/pkg/api/handlers/machine_metrics.go @@ -0,0 +1,37 @@ +package handlers + +import ( + "net/http" + + "github.com/gobitfly/beaconchain/pkg/api/types" +) + +func (h *HandlerService) InternalGetUserMachineMetrics(w http.ResponseWriter, r *http.Request) { + h.PublicGetUserMachineMetrics(w, r) +} + +func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + offset := v.checkUint(q.Get("offset"), "offset") + limit := uint64(180) + if limitParam := q.Get("limit"); limitParam != "" { + limit = v.checkUint(limitParam, "limit") + } + + data, err := h.dai.GetUserMachineMetrics(r.Context(), userId, limit, offset) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetUserMachineMetricsRespone{ + Data: *data, + } + + returnOk(w, r, response) +} diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index e0e560b8c..c579ccbd6 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -6,12 +6,12 @@ import ( "fmt" "math" "net/http" - "reflect" "time" "github.com/gobitfly/beaconchain/pkg/api/enums" "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gorilla/mux" + "github.com/shopspring/decimal" ) // All handler function names must include the HTTP method and the path they handle @@ -25,7 +25,6 @@ import ( // @description - Setting the URL query parameter in the following format: `api_key={your_api_key}`.\ // @description Example: `https://beaconcha.in/api/v2/example?field=value&api_key={your_api_key}` -// @host beaconcha.in // @BasePath /api/v2 // @securitydefinitions.apikey ApiKeyInHeader @@ -271,7 +270,7 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h // @Security ApiKeyInHeader || ApiKeyInQuery // @Tags Validator Dashboard Management // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Success 204 "Dashboard deleted successfully." // @Failure 400 {object} types.ApiErrorResponse "Bad Request" // @Router /validator-dashboards/{dashboard_id} [delete] @@ -297,7 +296,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPutValidatorDashboardName.request true "request" // @Success 200 {object} types.ApiDataResponse[types.VDBPostReturnData] // @Failure 400 {object} types.ApiErrorResponse @@ -336,7 +335,7 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPostValidatorDashboardGroups.request true "request" // @Success 201 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] // @Failure 400 {object} types.ApiErrorResponse @@ -400,8 +399,8 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." // @Param request body handlers.PublicPutValidatorDashboardGroups.request true "request" // @Success 200 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] // @Failure 400 {object} types.ApiErrorResponse @@ -453,8 +452,8 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter // @Security ApiKeyInHeader || ApiKeyInQuery // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." // @Success 204 "Group deleted successfully." // @Failure 400 {object} types.ApiErrorResponse // @Router /validator-dashboards/{dashboard_id}/groups/{group_id} [delete] @@ -491,16 +490,15 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit // PublicGetValidatorDashboardGroups godoc // -// @Description Add new validators to a specified dashboard or update the group of already-added validators. +// @Description Add new validators to a specified dashboard or update the group of already-added validators. This endpoint will always add as many validators as possible, even if more validators are provided than allowed by the subscription plan. The response will contain a list of added validators. // @Security ApiKeyInHeader || ApiKeyInQuery // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param request body handlers.PublicPostValidatorDashboardValidators.request true "`group_id`: (optional) Provide a single group id, to which all validators get added to. If omitted, the default group will be used.

To add validators, only one of the following fields can be set:" +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardValidators.request true "`group_id`: (optional) Provide a single group id, to which all validators get added to. If omitted, the default group will be used.

To add validators or update their group, only one of the following fields can be set:" // @Success 201 {object} types.ApiDataResponse[[]types.VDBPostValidatorsData] "Returns a list of added validators." // @Failure 400 {object} types.ApiErrorResponse -// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their validator limit." // @Router /validator-dashboards/{dashboard_id}/validators [post] func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError @@ -512,7 +510,9 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW WithdrawalAddress string `json:"withdrawal_address,omitempty"` Graffiti string `json:"graffiti,omitempty"` } - var req request + req := request{ + GroupId: types.DefaultGroupId, // default value + } if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return @@ -521,11 +521,17 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } + groupId := req.GroupId // check if exactly one of validators, deposit_address, withdrawal_address, graffiti is set - fields := []interface{}{req.Validators, req.DepositAddress, req.WithdrawalAddress, req.Graffiti} + nilFields := []bool{ + req.Validators == nil, + req.DepositAddress == "", + req.WithdrawalAddress == "", + req.Graffiti == "", + } var count int - for _, set := range fields { - if !reflect.ValueOf(set).IsZero() { + for _, isNil := range nilFields { + if !isNil { count++ } } @@ -537,7 +543,6 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW return } - groupId := req.GroupId ctx := r.Context() groupExists, err := h.dai.GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) if err != nil { @@ -558,11 +563,23 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, err) return } - limit := userInfo.PremiumPerks.ValidatorsPerDashboard if req.Validators == nil && !userInfo.PremiumPerks.BulkAdding && !isUserAdmin(userInfo) { - returnConflict(w, r, errors.New("bulk adding not allowed with current subscription plan")) + returnForbidden(w, r, errors.New("bulk adding not allowed with current subscription plan")) return } + dashboardLimit := userInfo.PremiumPerks.ValidatorsPerDashboard + existingValidatorCount, err := h.dai.GetValidatorDashboardValidatorsCount(ctx, dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + var limit uint64 + if isUserAdmin(userInfo) { + limit = math.MaxUint32 // no limit for admins + } else if dashboardLimit >= existingValidatorCount { + limit = dashboardLimit - existingValidatorCount + } + var data []types.VDBPostValidatorsData var dataErr error switch { @@ -577,15 +594,8 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, err) return } - // check if adding more validators than allowed - existingValidatorCount, err := h.dai.GetValidatorDashboardExistingValidatorCount(ctx, dashboardId, validators) - if err != nil { - handleErr(w, r, err) - return - } - if uint64(len(validators)) > existingValidatorCount+limit { - returnConflict(w, r, fmt.Errorf("adding more validators than allowed, limit is %v new validators", limit)) - return + if len(validators) > int(limit) { + validators = validators[:limit] } data, dataErr = h.dai.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) @@ -631,7 +641,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id query string false "The ID of the group." +// @Param group_id query integer false "The ID of the group." // @Param limit query string false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(index, public_key, balance, status, withdrawal_credentials) // @Param search query string false "Search for Address, ENS." @@ -672,7 +682,7 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicDeleteValidatorDashboardValidators.request true "`validators`: Provide an array of validator indices or public keys that should get removed from the dashboard." // @Success 204 "Validators removed successfully." // @Failure 400 {object} types.ApiErrorResponse @@ -688,7 +698,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons handleErr(w, r, err) return } - indices, publicKeys := v.checkValidators(req.Validators, false) + indices, publicKeys := v.checkValidators(req.Validators, forbidEmpty) if v.hasErrors() { handleErr(w, r, v) return @@ -714,7 +724,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPostValidatorDashboardPublicIds.request true "`name`: Provide a public name for the dashboard
`share_settings`:" // @Success 201 {object} types.ApiDataResponse[types.VDBPublicId] // @Failure 400 {object} types.ApiErrorResponse @@ -768,7 +778,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param public_id path string true "The ID of the public ID." // @Param request body handlers.PublicPutValidatorDashboardPublicId.request true "`name`: Provide a public name for the dashboard
`share_settings`:" // @Success 200 {object} types.ApiDataResponse[types.VDBPublicId] @@ -823,7 +833,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit // @Security ApiKeyInHeader || ApiKeyInQuery // @Tags Validator Dashboard Management // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param public_id path string true "The ID of the public ID." // @Success 204 "Public ID deleted successfully." // @Failure 400 {object} types.ApiErrorResponse @@ -863,8 +873,8 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "request" +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "`is_archived`: Set to `true` to archive the dashboard, or `false` to unarchive it." // @Success 200 {object} types.ApiDataResponse[types.VDBPostArchivingReturnData] // @Failure 400 {object} types.ApiErrorResponse // @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." @@ -1019,7 +1029,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - checkEnumIsAllowed(&v, period, summaryAllowedPeriods, "period") + checkValueInAllowed(&v, period, summaryAllowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -1043,7 +1053,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param group_id path integer true "The ID of the group." // @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) // @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." // @Success 200 {object} types.GetValidatorDashboardGroupSummaryResponse @@ -1066,7 +1076,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response groupId := v.checkGroupId(vars["group_id"], forbidEmpty) period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - checkEnumIsAllowed(&v, period, summaryAllowedPeriods, "period") + checkValueInAllowed(&v, period, summaryAllowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -1142,7 +1152,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id query string false "The ID of the group." +// @Param group_id query integer false "The ID of the group." // @Param duty query string false "Validator duty to get data for." Enums(none, sync, slashed, proposal) Default(none) // @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) // @Success 200 {object} types.GetValidatorDashboardSummaryValidatorsResponse @@ -1161,7 +1171,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - checkEnumIsAllowed(&v, period, allowedPeriods, "period") + checkValueInAllowed(&v, period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -1246,8 +1256,8 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param epoch path string true "The epoch to get data for." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch to get data for." // @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." // @Success 200 {object} types.GetValidatorDashboardGroupRewardsResponse // @Failure 400 {object} types.ApiErrorResponse @@ -1322,8 +1332,8 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param epoch path string true "The epoch to get data for." -// @Param group_id query string false "The ID of the group." +// @Param epoch path integer true "The epoch to get data for." +// @Param group_id query integer false "The ID of the group." // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." // @Param limit query string false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(validator, reward) @@ -1460,8 +1470,8 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param timestamp path string true "The timestamp to get data for." +// @Param group_id path integer true "The ID of the group." +// @Param timestamp path integer true "The timestamp to get data for." // @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." // @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) // @Success 200 {object} types.GetValidatorDashboardGroupHeatmapResponse @@ -1901,9 +1911,9 @@ func (h *HandlerService) PublicGetUserNotifications(w http.ResponseWriter, r *ht // @Security ApiKeyInHeader || ApiKeyInQuery // @Tags Notifications // @Produce json -// @Param network query string false "If set, results will be filtered to only include networks given. Provide a comma separated list." +// @Param networks query string false "If set, results will be filtered to only include networks given. Provide a comma separated list." // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." " Enums(chain_id, timestamp, dashboard_id) // @Param search query string false "Search for Dashboard, Group" // @Success 200 {object} types.InternalGetUserNotificationDashboardsResponse @@ -1919,19 +1929,19 @@ func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWrit q := r.URL.Query() pagingParams := v.checkPagingParams(q) sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) - chainId := v.checkNetworkParameter(q.Get("network")) + chainIds := v.checkNetworksParameter(q.Get("networks")) if v.hasErrors() { handleErr(w, r, v) return } - data, paging, err := h.dai.GetDashboardNotifications(r.Context(), userId, chainId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.dai.GetDashboardNotifications(r.Context(), userId, chainIds, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return } response := types.InternalGetUserNotificationDashboardsResponse{ Data: data, - Paging: *paging, + Paging: *paging, // @Param epoch path strings } returnOk(w, r, response) } @@ -1943,8 +1953,8 @@ func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWrit // @Tags Notifications // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param epoch path string true "The epoch of the notification." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch of the notification." // @Success 200 {object} types.InternalGetUserNotificationsValidatorDashboardResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/validator-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] @@ -1976,8 +1986,8 @@ func (h *HandlerService) PublicGetUserNotificationsValidatorDashboard(w http.Res // @Tags Notifications // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param epoch path string true "The epoch of the notification." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch of the notification." // @Success 200 {object} types.InternalGetUserNotificationsAccountDashboardResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/account-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] @@ -2009,7 +2019,7 @@ func (h *HandlerService) PublicGetUserNotificationsAccountDashboard(w http.Respo // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(machine_name, threshold, event_type, timestamp) // @Param search query string false "Search for Machine" // @Success 200 {object} types.InternalGetUserNotificationMachinesResponse @@ -2048,7 +2058,7 @@ func (h *HandlerService) PublicGetUserNotificationMachines(w http.ResponseWriter // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(client_name, timestamp) // @Param search query string false "Search for Client" // @Success 200 {object} types.InternalGetUserNotificationClientsResponse @@ -2087,9 +2097,9 @@ func (h *HandlerService) PublicGetUserNotificationClients(w http.ResponseWriter, // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type, node_address) -// @Param search query string false "Search for TODO" +// @Param search query string false "Search for Node Address" // @Success 200 {object} types.InternalGetUserNotificationRocketPoolResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/rocket-pool [get] @@ -2126,9 +2136,8 @@ func (h *HandlerService) PublicGetUserNotificationRocketPool(w http.ResponseWrit // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type) -// @Param search query string false "Search for TODO" // @Success 200 {object} types.InternalGetUserNotificationNetworksResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/networks [get] @@ -2146,7 +2155,7 @@ func (h *HandlerService) PublicGetUserNotificationNetworks(w http.ResponseWriter handleErr(w, r, v) return } - data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2190,7 +2199,7 @@ func (h *HandlerService) PublicGetUserNotificationSettings(w http.ResponseWriter // @Tags Notification Settings // @Accept json // @Produce json -// @Param request body types.NotificationSettingsGeneral true "Notification settings" +// @Param request body types.NotificationSettingsGeneral true "Description TODO" // @Success 200 {object} types.InternalPutUserNotificationSettingsGeneralResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/settings/general [put] @@ -2209,9 +2218,6 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons checkMinMax(&v, req.MachineStorageUsageThreshold, 0, 1, "machine_storage_usage_threshold") checkMinMax(&v, req.MachineCpuUsageThreshold, 0, 1, "machine_cpu_usage_threshold") checkMinMax(&v, req.MachineMemoryUsageThreshold, 0, 1, "machine_memory_usage_threshold") - checkMinMax(&v, req.RocketPoolMaxCollateralThreshold, 0, 1, "rocket_pool_max_collateral_threshold") - checkMinMax(&v, req.RocketPoolMinCollateralThreshold, 0, 1, "rocket_pool_min_collateral_threshold") - // TODO: check validity of clients if v.hasErrors() { handleErr(w, r, v) return @@ -2235,7 +2241,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons // @Accept json // @Produce json // @Param network path string true "The networks name or chain ID." -// @Param request body types.NotificationSettingsNetwork true "Notification settings" +// @Param request body handlers.PublicPutUserNotificationSettingsNetworks.request true "Description Todo" // @Success 200 {object} types.InternalPutUserNotificationSettingsNetworksResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/settings/networks/{network} [put] @@ -2246,19 +2252,42 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon handleErr(w, r, err) return } - var req types.NotificationSettingsNetwork + type request struct { + IsGasAboveSubscribed bool `json:"is_gas_above_subscribed"` + GasAboveThreshold string `json:"gas_above_threshold"` + IsGasBelowSubscribed bool `json:"is_gas_below_subscribed"` + GasBelowThreshold string `json:"gas_below_threshold" ` + IsParticipationRateSubscribed bool `json:"is_participation_rate_subscribed"` + ParticipationRateThreshold float64 `json:"participation_rate_threshold"` + IsNewRewardRoundSubscribed bool `json:"is_new_reward_round_subscribed"` + } + var req request if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return } checkMinMax(&v, req.ParticipationRateThreshold, 0, 1, "participation_rate_threshold") - chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) + + minWei := decimal.New(1000000, 1) // 0.001 Gwei + maxWei := decimal.New(1000000000000, 1) // 1000 Gwei + gasAboveThreshold := v.checkWeiMinMax(req.GasAboveThreshold, "gas_above_threshold", minWei, maxWei) + gasBelowThreshold := v.checkWeiMinMax(req.GasBelowThreshold, "gas_below_threshold", minWei, maxWei) if v.hasErrors() { handleErr(w, r, v) return } - err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, req) + settings := types.NotificationSettingsNetwork{ + IsGasAboveSubscribed: req.IsGasAboveSubscribed, + GasAboveThreshold: gasAboveThreshold, + IsGasBelowSubscribed: req.IsGasBelowSubscribed, + GasBelowThreshold: gasBelowThreshold, + IsParticipationRateSubscribed: req.IsParticipationRateSubscribed, + ParticipationRateThreshold: req.ParticipationRateThreshold, + IsNewRewardRoundSubscribed: req.IsNewRewardRoundSubscribed, + } + + err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, settings) if err != nil { handleErr(w, r, err) return @@ -2266,7 +2295,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon response := types.InternalPutUserNotificationSettingsNetworksResponse{ Data: types.NotificationNetwork{ ChainId: chainId, - Settings: req, + Settings: settings, }, } returnOk(w, r, response) @@ -2280,7 +2309,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon // @Accept json // @Produce json // @Param paired_device_id path string true "The paired device ID." -// @Param request body handlers.PublicPutUserNotificationSettingsPairedDevices.request true "Notification settings" +// @Param request body handlers.PublicPutUserNotificationSettingsPairedDevices.request true "Description TODO" // @Success 200 {object} types.InternalPutUserNotificationSettingsPairedDevicesResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [put] @@ -2355,6 +2384,49 @@ func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w htt returnNoContent(w, r) } +// PublicPutUserNotificationSettingsClient godoc +// +// @Description Update client notification settings for the authenticated user. When a client is subscribed, notifications will be sent when a new version is available. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param client_id path integer true "The ID of the client." +// @Param request body handlers.PublicPutUserNotificationSettingsClient.request true "`is_subscribed`: Set to `true` to subscribe to notifications; set to `false` to unsubscribe." +// @Success 200 {object} types.InternalPutUserNotificationSettingsClientResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/clients/{client_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsClient(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + type request struct { + IsSubscribed bool `json:"is_subscribed"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + clientId := v.checkUint(mux.Vars(r)["client_id"], "client_id") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.dai.UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsClientResponse{ + Data: *data, + } + returnOk(w, r, response) +} + // PublicGetUserNotificationSettingsDashboards godoc // // @Description Get a list of notification settings for the dashboards of the authenticated user. @@ -2362,7 +2434,7 @@ func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w htt // @Tags Notification Settings // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums (dashboard_id, group_name) // @Param search query string false "Search for Dashboard, Group" // @Success 200 {object} types.InternalGetUserNotificationSettingsDashboardsResponse @@ -2402,7 +2474,7 @@ func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.Resp // @Accept json // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param group_id path integer true "The ID of the group." // @Param request body types.NotificationSettingsValidatorDashboard true "Notification settings" // @Success 200 {object} types.InternalPutUserNotificationSettingsValidatorDashboardResponse // @Failure 400 {object} types.ApiErrorResponse @@ -2418,6 +2490,9 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h vars := mux.Vars(r) dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) groupId := v.checkExistingGroupId(vars["group_id"]) + + checkMinMax(&v, req.MaxCollateralThreshold, 0, 1, "max_collateral_threshold") + checkMinMax(&v, req.MinCollateralThreshold, 0, 1, "min_collateral_threshold") if v.hasErrors() { handleErr(w, r, v) return @@ -2441,7 +2516,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h // @Accept json // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param group_id path integer true "The ID of the group." // @Param request body handlers.PublicPutUserNotificationSettingsAccountDashboard.request true "Notification settings" // @Success 200 {object} types.InternalPutUserNotificationSettingsAccountDashboardResponse // @Failure 400 {object} types.ApiErrorResponse @@ -2467,14 +2542,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsAccountDashboard(w htt handleErr(w, r, err) return } - chainIdMap := v.checkNetworkSlice(req.SubscribedChainIds) - // convert to uint64[] slice - chainIds := make([]uint64, len(chainIdMap)) - i := 0 - for k := range chainIdMap { - chainIds[i] = k - i++ - } + chainIds := v.checkNetworkSlice(req.SubscribedChainIds) checkMinMax(&v, req.ERC20TokenTransfersValueThreshold, 0, math.MaxFloat64, "group_offline_threshold") vars := mux.Vars(r) dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) diff --git a/backend/pkg/api/handlers/search_handlers.go b/backend/pkg/api/handlers/search_handlers.go index 6793e4ef0..fa332188a 100644 --- a/backend/pkg/api/handlers/search_handlers.go +++ b/backend/pkg/api/handlers/search_handlers.go @@ -5,8 +5,10 @@ import ( "encoding/hex" "errors" "fmt" + "maps" "net/http" "regexp" + "slices" "strconv" "strings" @@ -68,12 +70,12 @@ func (h *HandlerService) InternalPostSearch(w http.ResponseWriter, r *http.Reque searchResultChan := make(chan types.SearchResult) // iterate over all combinations of search types and networks - for searchType := range searchTypeSet { + for _, searchType := range searchTypeSet { // check if input matches the regex for the search type if !searchTypeToRegex[searchType].MatchString(req.Input) { continue } - for chainId := range chainIdSet { + for _, chainId := range chainIdSet { chainId := chainId searchType := searchType g.Go(func() error { @@ -326,14 +328,14 @@ func (h *HandlerService) handleSearchValidatorsByGraffiti(ctx context.Context, i // Input Validation // if the passed slice is empty, return a set with all chain IDs; otherwise check if the passed networks are valid -func (v *validationError) checkNetworkSlice(networks []intOrString) map[uint64]struct{} { +func (v *validationError) checkNetworkSlice(networks []intOrString) []uint64 { networkSet := map[uint64]struct{}{} // if the list is empty, query all networks if len(networks) == 0 { for _, n := range allNetworks { networkSet[n.ChainId] = struct{}{} } - return networkSet + return slices.Collect(maps.Keys(networkSet)) } // list not empty, check if networks are valid for _, network := range networks { @@ -344,18 +346,18 @@ func (v *validationError) checkNetworkSlice(networks []intOrString) map[uint64]s } networkSet[chainId] = struct{}{} } - return networkSet + return slices.Collect(maps.Keys(networkSet)) } // if the passed slice is empty, return a set with all search types; otherwise check if the passed types are valid -func (v *validationError) checkSearchTypes(types []searchTypeKey) map[searchTypeKey]struct{} { +func (v *validationError) checkSearchTypes(types []searchTypeKey) []searchTypeKey { typeSet := map[searchTypeKey]struct{}{} // if the list is empty, query all types if len(types) == 0 { for t := range searchTypeToRegex { typeSet[t] = struct{}{} } - return typeSet + return slices.Collect(maps.Keys(typeSet)) } // list not empty, check if types are valid for _, t := range types { @@ -365,5 +367,5 @@ func (v *validationError) checkSearchTypes(types []searchTypeKey) map[searchType } typeSet[t] = struct{}{} } - return typeSet + return slices.Collect(maps.Keys(typeSet)) } diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 16ab4c0b2..e15ad87cb 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -32,7 +32,7 @@ func NewApiRouter(dataAccessor dataaccess.DataAccessor, cfg *types.Config) *mux. if !(cfg.Frontend.CsrfInsecure || cfg.Frontend.Debug) { internalRouter.Use(getCsrfProtectionMiddleware(cfg), csrfInjecterMiddleware) } - handlerService := handlers.NewHandlerService(dataAccessor, sessionManager) + handlerService := handlers.NewHandlerService(dataAccessor, sessionManager, !cfg.Frontend.DisableStatsInserts) // store user id in context, if available publicRouter.Use(handlers.GetUserIdStoreMiddleware(handlerService.GetUserIdByApiKey)) @@ -123,6 +123,8 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro {http.MethodGet, "/users/me/dashboards", hs.PublicGetUserDashboards, hs.InternalGetUserDashboards}, {http.MethodPut, "/users/me/notifications/settings/paired-devices/{client_id}/token", nil, hs.InternalPostUsersMeNotificationSettingsPairedDevicesToken}, + {http.MethodGet, "/users/me/machine-metrics", hs.PublicGetUserMachineMetrics, hs.InternalGetUserMachineMetrics}, + {http.MethodPost, "/search", nil, hs.InternalPostSearch}, {http.MethodPost, "/account-dashboards", hs.PublicPostAccountDashboards, hs.InternalPostAccountDashboards}, @@ -329,6 +331,7 @@ func addNotificationRoutes(hs *handlers.HandlerService, publicRouter, internalRo {http.MethodPut, "/settings/networks/{network}", hs.PublicPutUserNotificationSettingsNetworks, hs.InternalPutUserNotificationSettingsNetworks}, {http.MethodPut, "/settings/paired-devices/{paired_device_id}", hs.PublicPutUserNotificationSettingsPairedDevices, hs.InternalPutUserNotificationSettingsPairedDevices}, {http.MethodDelete, "/settings/paired-devices/{paired_device_id}", hs.PublicDeleteUserNotificationSettingsPairedDevices, hs.InternalDeleteUserNotificationSettingsPairedDevices}, + {http.MethodPut, "/settings/clients/{client_id}", hs.PublicPutUserNotificationSettingsClient, hs.InternalPutUserNotificationSettingsClient}, {http.MethodGet, "/settings/dashboards", hs.PublicGetUserNotificationSettingsDashboards, hs.InternalGetUserNotificationSettingsDashboards}, {http.MethodPost, "/test-email", hs.PublicPostUserNotificationsTestEmail, hs.InternalPostUserNotificationsTestEmail}, {http.MethodPost, "/test-push", hs.PublicPostUserNotificationsTestPush, hs.InternalPostUserNotificationsTestPush}, diff --git a/backend/pkg/api/types/machine_metrics.go b/backend/pkg/api/types/machine_metrics.go new file mode 100644 index 000000000..059c06471 --- /dev/null +++ b/backend/pkg/api/types/machine_metrics.go @@ -0,0 +1,79 @@ +package types + +type MachineMetricSystem struct { + Timestamp uint64 `json:"timestamp,omitempty" faker:"boundary_start=1725166800, boundary_end=1725177600"` + ExporterVersion string `json:"exporter_version,omitempty"` + // system + CpuCores uint64 `json:"cpu_cores,omitempty"` + CpuThreads uint64 `json:"cpu_threads,omitempty"` + CpuNodeSystemSecondsTotal uint64 `json:"cpu_node_system_seconds_total,omitempty"` + CpuNodeUserSecondsTotal uint64 `json:"cpu_node_user_seconds_total,omitempty"` + CpuNodeIowaitSecondsTotal uint64 `json:"cpu_node_iowait_seconds_total,omitempty"` + CpuNodeIdleSecondsTotal uint64 `json:"cpu_node_idle_seconds_total,omitempty"` + MemoryNodeBytesTotal uint64 `json:"memory_node_bytes_total,omitempty"` + MemoryNodeBytesFree uint64 `json:"memory_node_bytes_free,omitempty"` + MemoryNodeBytesCached uint64 `json:"memory_node_bytes_cached,omitempty"` + MemoryNodeBytesBuffers uint64 `json:"memory_node_bytes_buffers,omitempty"` + DiskNodeBytesTotal uint64 `json:"disk_node_bytes_total,omitempty"` + DiskNodeBytesFree uint64 `json:"disk_node_bytes_free,omitempty"` + DiskNodeIoSeconds uint64 `json:"disk_node_io_seconds,omitempty"` + DiskNodeReadsTotal uint64 `json:"disk_node_reads_total,omitempty"` + DiskNodeWritesTotal uint64 `json:"disk_node_writes_total,omitempty"` + NetworkNodeBytesTotalReceive uint64 `json:"network_node_bytes_total_receive,omitempty"` + NetworkNodeBytesTotalTransmit uint64 `json:"network_node_bytes_total_transmit,omitempty"` + MiscNodeBootTsSeconds uint64 `json:"misc_node_boot_ts_seconds,omitempty"` + MiscOs string `json:"misc_os,omitempty"` + // do not store in bigtable but include them in generated model + Machine *string `json:"machine,omitempty"` +} + +type MachineMetricValidator struct { + Timestamp uint64 `json:"timestamp,omitempty" faker:"boundary_start=1725166800, boundary_end=1725177600"` + ExporterVersion string `json:"exporter_version,omitempty"` + // process + CpuProcessSecondsTotal uint64 `json:"cpu_process_seconds_total,omitempty"` + MemoryProcessBytes uint64 `json:"memory_process_bytes,omitempty"` + ClientName string `json:"client_name,omitempty"` + ClientVersion string `json:"client_version,omitempty"` + ClientBuild uint64 `json:"client_build,omitempty"` + SyncEth2FallbackConfigured bool `json:"sync_eth2_fallback_configured,omitempty"` + SyncEth2FallbackConnected bool `json:"sync_eth2_fallback_connected,omitempty"` + // validator + ValidatorTotal uint64 `json:"validator_total,omitempty"` + ValidatorActive uint64 `json:"validator_active,omitempty"` + // do not store in bigtable but include them in generated model + Machine *string `json:"machine,omitempty"` +} + +type MachineMetricNode struct { + Timestamp uint64 `json:"timestamp,omitempty" faker:"boundary_start=1725166800, boundary_end=1725177600"` + ExporterVersion string `json:"exporter_version,omitempty"` + // process + CpuProcessSecondsTotal uint64 `json:"cpu_process_seconds_total,omitempty"` + MemoryProcessBytes uint64 `json:"memory_process_bytes,omitempty"` + ClientName string `json:"client_name,omitempty"` + ClientVersion string `json:"client_version,omitempty"` + ClientBuild uint64 `json:"client_build,omitempty"` + SyncEth2FallbackConfigured bool `json:"sync_eth2_fallback_configured,omitempty"` + SyncEth2FallbackConnected bool `json:"sync_eth2_fallback_connected,omitempty"` + // node + DiskBeaconchainBytesTotal uint64 `json:"disk_beaconchain_bytes_total,omitempty"` + NetworkLibp2PBytesTotalReceive uint64 `json:"network_libp2p_bytes_total_receive,omitempty"` + NetworkLibp2PBytesTotalTransmit uint64 `json:"network_libp2p_bytes_total_transmit,omitempty"` + NetworkPeersConnected uint64 `json:"network_peers_connected,omitempty"` + SyncEth1Connected bool `json:"sync_eth1_connected,omitempty"` + SyncEth2Synced bool `json:"sync_eth2_synced,omitempty"` + SyncBeaconHeadSlot uint64 `json:"sync_beacon_head_slot,omitempty"` + SyncEth1FallbackConfigured bool `json:"sync_eth1_fallback_configured,omitempty"` + SyncEth1FallbackConnected bool `json:"sync_eth1_fallback_connected,omitempty"` + // do not store in bigtable but include them in generated model + Machine *string `json:"machine,omitempty"` +} + +type MachineMetricsData struct { + SystemMetrics []*MachineMetricSystem `json:"system_metrics" faker:"slice_len=30"` + ValidatorMetrics []*MachineMetricValidator `json:"validator_metrics" faker:"slice_len=30"` + NodeMetrics []*MachineMetricNode `json:"node_metrics" faker:"slice_len=30"` +} + +type GetUserMachineMetricsRespone ApiDataResponse[MachineMetricsData] diff --git a/backend/pkg/api/types/mobile.go b/backend/pkg/api/types/mobile.go index 62c323b0d..cb662370b 100644 --- a/backend/pkg/api/types/mobile.go +++ b/backend/pkg/api/types/mobile.go @@ -14,7 +14,7 @@ type MobileWidgetData struct { Last24hIncome decimal.Decimal `json:"last_24h_income" faker:"eth"` Last7dIncome decimal.Decimal `json:"last_7d_income" faker:"eth"` Last30dApr float64 `json:"last_30d_apr"` - Last30dEfficiency decimal.Decimal `json:"last_30d_efficiency" faker:"eth"` + Last30dEfficiency float64 `json:"last_30d_efficiency"` NetworkEfficiency float64 `json:"network_efficiency"` RplPrice decimal.Decimal `json:"rpl_price" faker:"eth"` RplApr float64 `json:"rpl_apr"` diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 1be663dae..292056fcf 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -17,12 +17,11 @@ type NotificationOverviewData struct { Last24hWebhookCount uint64 `json:"last_24h_webhook_count"` // counts are shown in their respective tables - VDBSubscriptionsCount uint64 `json:"vdb_subscriptions_count"` - ADBSubscriptionsCount uint64 `json:"adb_subscriptions_count"` - MachinesSubscriptionCount uint64 `json:"machines_subscription_count"` - ClientsSubscriptionCount uint64 `json:"clients_subscription_count"` - RocketPoolSubscriptionCount uint64 `json:"rocket_pool_subscription_count"` - NetworksSubscriptionCount uint64 `json:"networks_subscription_count"` + VDBSubscriptionsCount uint64 `json:"vdb_subscriptions_count"` + ADBSubscriptionsCount uint64 `json:"adb_subscriptions_count"` + MachinesSubscriptionCount uint64 `json:"machines_subscription_count"` + ClientsSubscriptionCount uint64 `json:"clients_subscription_count"` + NetworksSubscriptionCount uint64 `json:"networks_subscription_count"` } type InternalGetUserNotificationsResponse ApiDataResponse[NotificationOverviewData] @@ -37,7 +36,7 @@ type NotificationDashboardsTableRow struct { GroupId uint64 `json:"group_id"` GroupName string `json:"group_name"` EntityCount uint64 `json:"entity_count"` - EventTypes []string `json:"event_types" tstype:"('validator_online' | 'validator_offline' | 'group_online' | 'group_offline' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'sync' | 'withdrawal' | 'got_slashed' | 'has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]" faker:"oneof: validator_offline, group_offline, attestation_missed, proposal_success, proposal_missed, proposal_upcoming, sync, withdrawal, slashed_own, incoming_tx, outgoing_tx, transfer_erc20, transfer_erc721, transfer_erc1155"` + EventTypes []string `json:"event_types" tstype:"('validator_online' | 'validator_offline' | 'group_online' | 'group_offline' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'max_collateral' | 'min_collateral' | 'sync' | 'withdrawal' | 'got_slashed' | 'has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]" faker:"oneof: validator_offline, group_offline, attestation_missed, proposal_success, proposal_missed, proposal_upcoming, max_collateral, min_collateral, sync, withdrawal, slashed_own, incoming_tx, outgoing_tx, transfer_erc20, transfer_erc721, transfer_erc1155"` } type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[NotificationDashboardsTableRow] @@ -74,6 +73,7 @@ type NotificationValidatorDashboardDetail struct { GroupOfflineReminder []NotificationEventGroup `json:"group_offline_reminder"` ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` GroupBackOnline []NotificationEventGroupBackOnline `json:"group_back_online"` + // TODO min and max collateral events } type InternalGetUserNotificationsValidatorDashboardResponse ApiDataResponse[NotificationValidatorDashboardDetail] @@ -99,7 +99,7 @@ type InternalGetUserNotificationsAccountDashboardResponse ApiDataResponse[Notifi // Machines Table type NotificationMachinesTableRow struct { MachineName string `json:"machine_name"` - Threshold float64 `json:"threshold" faker:"boundary_start=0, boundary_end=1"` + Threshold float64 `json:"threshold,omitempty" faker:"boundary_start=0, boundary_end=1"` EventType string `json:"event_type" tstype:"'offline' | 'storage' | 'cpu' | 'memory'" faker:"oneof: offline, storage, cpu, memory"` Timestamp int64 `json:"timestamp"` } @@ -111,6 +111,7 @@ type InternalGetUserNotificationMachinesResponse ApiPagingResponse[NotificationM type NotificationClientsTableRow struct { ClientName string `json:"client_name"` Version string `json:"version"` + Url string `json:"url"` Timestamp int64 `json:"timestamp"` } @@ -119,10 +120,10 @@ type InternalGetUserNotificationClientsResponse ApiPagingResponse[NotificationCl // ------------------------------------------------------------ // Rocket Pool Table type NotificationRocketPoolTableRow struct { - Timestamp int64 `json:"timestamp"` - EventType string `json:"event_type" tstype:"'reward_round' | 'collateral_max' | 'collateral_min'" faker:"oneof: reward_round, collateral_max, collateral_min"` - AlertValue float64 `json:"alert_value,omitempty"` // only for some notification types, e.g. max collateral - NodeAddress Hash `json:"node_address"` + Timestamp int64 `json:"timestamp"` + EventType string `json:"event_type" tstype:"'reward_round' | 'collateral_max' | 'collateral_min'" faker:"oneof: reward_round, collateral_max, collateral_min"` + Threshold float64 `json:"threshold,omitempty"` // only for some notification types, e.g. max collateral + Node Address `json:"node"` } type InternalGetUserNotificationRocketPoolResponse ApiPagingResponse[NotificationRocketPoolTableRow] @@ -130,10 +131,10 @@ type InternalGetUserNotificationRocketPoolResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Networks Table type NotificationNetworksTableRow struct { - ChainId uint64 `json:"chain_id"` - Timestamp int64 `json:"timestamp"` - EventType string `json:"event_type" tstype:"'gas_above' | 'gas_below' | 'participation_rate'" faker:"oneof: gas_above, gas_below, participation_rate"` - AlertValue decimal.Decimal `json:"alert_value"` // wei string for gas alerts, otherwise percentage (0-1) for participation rate + ChainId uint64 `json:"chain_id"` + Timestamp int64 `json:"timestamp"` + EventType string `json:"event_type" tstype:"'new_reward_round' | 'gas_above' | 'gas_below' | 'participation_rate'" faker:"oneof: new_reward_round, gas_above, gas_below, participation_rate"` + Threshold decimal.Decimal `json:"threshold,omitempty"` // participation rate threshold should also be passed as decimal string } type InternalGetUserNotificationNetworksResponse ApiPagingResponse[NotificationNetworksTableRow] @@ -147,6 +148,7 @@ type NotificationSettingsNetwork struct { GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"eth"` IsParticipationRateSubscribed bool `json:"is_participation_rate_subscribed"` ParticipationRateThreshold float64 `json:"participation_rate_threshold" faker:"boundary_start=0, boundary_end=1"` + IsNewRewardRoundSubscribed bool `json:"is_new_reward_round_subscribed"` } type NotificationNetwork struct { ChainId uint64 `json:"chain_id"` @@ -162,6 +164,15 @@ type NotificationPairedDevice struct { } type InternalPutUserNotificationSettingsPairedDevicesResponse ApiDataResponse[NotificationPairedDevice] +type NotificationSettingsClient struct { + Id uint64 `json:"id"` + Name string `json:"name"` + Category string `json:"category" tstype:"'execution_layer' | 'consensus_layer' | 'other'" faker:"oneof: execution_layer, consensus_layer, other"` + IsSubscribed bool `json:"is_subscribed"` +} + +type InternalPutUserNotificationSettingsClientResponse ApiDataResponse[NotificationSettingsClient] + type NotificationSettingsGeneral struct { DoNotDisturbTimestamp int64 `json:"do_not_disturb_timestamp"` // notifications are disabled until this timestamp IsEmailNotificationsEnabled bool `json:"is_email_notifications_enabled"` @@ -174,19 +185,14 @@ type NotificationSettingsGeneral struct { MachineCpuUsageThreshold float64 `json:"machine_cpu_usage_threshold" faker:"boundary_start=0, boundary_end=1"` IsMachineMemoryUsageSubscribed bool `json:"is_machine_memory_usage_subscribed"` MachineMemoryUsageThreshold float64 `json:"machine_memory_usage_threshold" faker:"boundary_start=0, boundary_end=1"` - - SubscribedClients []string `json:"subscribed_clients"` - IsRocketPoolNewRewardRoundSubscribed bool `json:"is_rocket_pool_new_reward_round_subscribed"` - IsRocketPoolMaxCollateralSubscribed bool `json:"is_rocket_pool_max_collateral_subscribed"` - RocketPoolMaxCollateralThreshold float64 `json:"rocket_pool_max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` - IsRocketPoolMinCollateralSubscribed bool `json:"is_rocket_pool_min_collateral_subscribed"` - RocketPoolMinCollateralThreshold float64 `json:"rocket_pool_min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` } type InternalPutUserNotificationSettingsGeneralResponse ApiDataResponse[NotificationSettingsGeneral] type NotificationSettings struct { - GeneralSettings NotificationSettingsGeneral `json:"general_settings"` - Networks []NotificationNetwork `json:"networks"` - PairedDevices []NotificationPairedDevice `json:"paired_devices"` + GeneralSettings NotificationSettingsGeneral `json:"general_settings"` + HasMachines bool `json:"has_machines"` + Networks []NotificationNetwork `json:"networks"` + PairedDevices []NotificationPairedDevice `json:"paired_devices"` + Clients []NotificationSettingsClient `json:"clients" faker:"slice_len=10"` } type InternalGetUserNotificationSettingsResponse ApiDataResponse[NotificationSettings] @@ -204,6 +210,11 @@ type NotificationSettingsValidatorDashboard struct { IsSyncSubscribed bool `json:"is_sync_subscribed"` IsWithdrawalProcessedSubscribed bool `json:"is_withdrawal_processed_subscribed"` IsSlashedSubscribed bool `json:"is_slashed_subscribed"` + + IsMaxCollateralSubscribed bool `json:"is_max_collateral_subscribed"` + MaxCollateralThreshold float64 `json:"max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` + IsMinCollateralSubscribed bool `json:"is_min_collateral_subscribed"` + MinCollateralThreshold float64 `json:"min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` } type InternalPutUserNotificationSettingsValidatorDashboardResponse ApiDataResponse[NotificationSettingsValidatorDashboard] diff --git a/backend/pkg/blobindexer/blobindexer.go b/backend/pkg/blobindexer/blobindexer.go new file mode 100644 index 000000000..a690fea16 --- /dev/null +++ b/backend/pkg/blobindexer/blobindexer.go @@ -0,0 +1,470 @@ +package blobindexer + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "sync" + "time" + + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/commons/version" + "github.com/gobitfly/beaconchain/pkg/consapi" + "go.uber.org/atomic" + + "github.com/gobitfly/beaconchain/pkg/consapi/network" + constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" + + "github.com/aws/aws-sdk-go-v2/aws" + awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" + "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/service/s3" + lru "github.com/hashicorp/golang-lru/v2" + "golang.org/x/sync/errgroup" +) + +var enableCheckingBeforePutting = false +var waitForOtherBlobIndexerDuration = time.Second * 60 + +type BlobIndexer struct { + S3Client *s3.Client + running bool + runningMu *sync.Mutex + clEndpoint string + cl consapi.Client + id string + networkID string + writtenBlobsCache *lru.Cache[string, bool] +} + +func NewBlobIndexer() (*BlobIndexer, error) { + initDB() + cfg, err := config.LoadDefaultConfig(context.TODO(), + config.WithCredentialsProvider(credentials.NewStaticCredentialsProvider( + utils.Config.BlobIndexer.S3.AccessKeyId, + utils.Config.BlobIndexer.S3.AccessKeySecret, + "", + )), + config.WithRegion("auto"), + ) + if err != nil { + return nil, err + } + s3Client := s3.NewFromConfig(cfg, func(o *s3.Options) { + o.UsePathStyle = true + o.BaseEndpoint = aws.String(utils.Config.BlobIndexer.S3.Endpoint) + }) + + writtenBlobsCache, err := lru.New[string, bool](1000) + if err != nil { + return nil, err + } + + id := utils.GetUUID() + bi := &BlobIndexer{ + S3Client: s3Client, + runningMu: &sync.Mutex{}, + clEndpoint: "http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port, + cl: consapi.NewClient("http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port), + id: id, + writtenBlobsCache: writtenBlobsCache, + } + return bi, nil +} + +func initDB() { + if utils.Config.BlobIndexer.DisableStatusReports { + return + } + if db.WriterDb != nil && db.ReaderDb != nil { + return + } + db.WriterDb, db.ReaderDb = db.MustInitDB(&types.DatabaseConfig{ + Username: utils.Config.WriterDatabase.Username, + Password: utils.Config.WriterDatabase.Password, + Name: utils.Config.WriterDatabase.Name, + Host: utils.Config.WriterDatabase.Host, + Port: utils.Config.WriterDatabase.Port, + MaxOpenConns: utils.Config.WriterDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.WriterDatabase.MaxIdleConns, + SSL: utils.Config.WriterDatabase.SSL, + }, &types.DatabaseConfig{ + Username: utils.Config.ReaderDatabase.Username, + Password: utils.Config.ReaderDatabase.Password, + Name: utils.Config.ReaderDatabase.Name, + Host: utils.Config.ReaderDatabase.Host, + Port: utils.Config.ReaderDatabase.Port, + MaxOpenConns: utils.Config.ReaderDatabase.MaxOpenConns, + MaxIdleConns: utils.Config.ReaderDatabase.MaxIdleConns, + SSL: utils.Config.ReaderDatabase.SSL, + }, "pgx", "postgres") +} + +func (bi *BlobIndexer) Start() { + bi.runningMu.Lock() + if bi.running { + bi.runningMu.Unlock() + return + } + bi.running = true + bi.runningMu.Unlock() + + log.InfoWithFields(log.Fields{"version": version.Version, "clEndpoint": bi.clEndpoint, "s3Endpoint": utils.Config.BlobIndexer.S3.Endpoint, "id": bi.id}, "starting blobindexer") + for { + err := bi.index() + if err != nil { + log.Error(err, "failed indexing blobs", 0) + } + time.Sleep(time.Second * 10) + } +} + +func (bi *BlobIndexer) index() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + + headHeader := &constypes.StandardBeaconHeaderResponse{} + finalizedHeader := &constypes.StandardBeaconHeaderResponse{} + spec := &constypes.StandardSpecResponse{} + + g, gCtx := errgroup.WithContext(ctx) + g.SetLimit(3) + g.Go(func() error { + var err error + spec, err = bi.cl.GetSpec() + if err != nil { + return fmt.Errorf("error bi.cl.GetSpec: %w", err) + } + return nil + }) + g.Go(func() error { + var err error + headHeader, err = bi.cl.GetBlockHeader("head") + if err != nil { + return fmt.Errorf("error bi.cl.GetBlockHeader(head): %w", err) + } + return nil + }) + g.Go(func() error { + var err error + finalizedHeader, err = bi.cl.GetBlockHeader("finalized") + if err != nil { + return fmt.Errorf("error bi.cl.GetBlockHeader(finalized): %w", err) + } + return nil + }) + err := g.Wait() + if err != nil { + return err + } + + if spec.Data.DenebForkEpoch == nil { + return fmt.Errorf("DENEB_FORK_EPOCH not set in spec") + } + if spec.Data.MinEpochsForBlobSidecarsRequests == nil { + return fmt.Errorf("MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS not set in spec") + } + + nodeDepositNetworkId := uint64(spec.Data.DepositNetworkID) + if utils.Config.Chain.ClConfig.DepositNetworkID != nodeDepositNetworkId { + return fmt.Errorf("config.DepositNetworkId != node.DepositNetworkId: %v != %v", utils.Config.Chain.ClConfig.DepositNetworkID, nodeDepositNetworkId) + } + bi.networkID = fmt.Sprintf("%d", nodeDepositNetworkId) + + status, err := bi.GetIndexerStatus() + if err != nil { + return fmt.Errorf("error bi.GetIndexerStatus: %w", err) + } + + // skip if another blobIndexer is already indexing - it is ok if multiple blobIndexers are indexing the same finalized slot, this is just best effort to avoid duplicate work + if status.CurrentBlobIndexerId != bi.id && status.LastUpdate.After(time.Now().Add(-waitForOtherBlobIndexerDuration)) { + log.InfoWithFields(log.Fields{"lastIndexedFinalizedSlot": status.LastIndexedFinalizedSlot, "currentBlobIndexerId": status.CurrentBlobIndexerId, "finalizedSlot": finalizedHeader.Data.Header.Message.Slot, "lastUpdate": status.LastUpdate}, "found other blobIndexer indexing, skipping") + return nil + } + + // check if node still has last indexed blobs (if its outside the range defined by MAX_REQUEST_BLOCKS_DENEB), otherwise assume that the node has pruned too far and we would miss blobs + minBlobSlotRange := *spec.Data.MinEpochsForBlobSidecarsRequests * uint64(spec.Data.SlotsPerEpoch) + minBlobSlot := uint64(0) + if headHeader.Data.Header.Message.Slot > minBlobSlotRange { + minBlobSlot = headHeader.Data.Header.Message.Slot - minBlobSlotRange + } + pruneMarginSlotRange := utils.Config.BlobIndexer.PruneMarginEpochs * uint64(spec.Data.SlotsPerEpoch) + if minBlobSlot > pruneMarginSlotRange { + minBlobSlot = minBlobSlot - pruneMarginSlotRange + } + if status.LastIndexedFinalizedSlot < minBlobSlot && status.LastIndexedFinalizedBlobSlot > 0 { + bs, err := bi.cl.GetBlobSidecars(status.LastIndexedFinalizedBlobSlot) + if err != nil { + return err + } + if len(bs.Data) == 0 { + return fmt.Errorf("no blobs found at lastIndexedFinalizedBlobSlot: %v, node has pruned too far?", status.LastIndexedFinalizedBlobSlot) + } + } + + lastIndexedFinalizedBlobSlot := atomic.NewUint64(status.LastIndexedFinalizedBlobSlot) + + denebForkSlot := *spec.Data.DenebForkEpoch * uint64(spec.Data.SlotsPerEpoch) + startSlot := status.LastIndexedFinalizedSlot + 1 + if status.LastIndexedFinalizedSlot <= denebForkSlot { + startSlot = denebForkSlot + } + + if headHeader.Data.Header.Message.Slot <= startSlot { + return fmt.Errorf("headHeader.Data.Header.Message.Slot <= startSlot: %v < %v (denebForkEpoch: %v, denebForkSlot: %v, slotsPerEpoch: %v)", headHeader.Data.Header.Message.Slot, startSlot, utils.Config.Chain.ClConfig.DenebForkEpoch, denebForkSlot, utils.Config.Chain.ClConfig.SlotsPerEpoch) + } + + start := time.Now() + log.InfoWithFields(log.Fields{ + "lastIndexedFinalizedSlot": status.LastIndexedFinalizedSlot, + "headSlot": headHeader.Data.Header.Message.Slot, + "finalizedSlot": finalizedHeader.Data.Header.Message.Slot, + "startSlot": startSlot, + "networkID": bi.networkID, + }, "indexing blobs") + defer func() { + log.InfoWithFields(log.Fields{ + "startSlot": startSlot, + "headSlot": headHeader.Data.Header.Message.Slot, + "finalizedSlot": finalizedHeader.Data.Header.Message.Slot, + "duration": time.Since(start), + "networkID": bi.networkID, + }, "finished indexing blobs") + }() + + batchSize := uint64(100) + for batchStart := startSlot; batchStart <= headHeader.Data.Header.Message.Slot; batchStart += batchSize { + batchStartTs := time.Now() + batchBlobsIndexed := atomic.NewInt64(0) + batchEnd := batchStart + batchSize + if batchEnd > headHeader.Data.Header.Message.Slot { + batchEnd = headHeader.Data.Header.Message.Slot + } + g, gCtx = errgroup.WithContext(context.Background()) + g.SetLimit(4) + for slot := batchStart; slot <= batchEnd; slot++ { + slot := slot + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + numBlobs, err := bi.indexBlobsAtSlot(slot) + if err != nil { + return fmt.Errorf("error bi.IndexBlobsAtSlot(%v): %w", slot, err) + } + if numBlobs > 0 && slot <= finalizedHeader.Data.Header.Message.Slot && slot > lastIndexedFinalizedBlobSlot.Load() { + lastIndexedFinalizedBlobSlot.Store(slot) + } + batchBlobsIndexed.Add(int64(numBlobs)) + return nil + }) + } + err = g.Wait() + if err != nil { + return err + } + lastIndexedFinalizedSlot := uint64(0) + if batchEnd <= finalizedHeader.Data.Header.Message.Slot { + lastIndexedFinalizedSlot = batchEnd + } else { + lastIndexedFinalizedSlot = finalizedHeader.Data.Header.Message.Slot + } + newBlobIndexerStatus := BlobIndexerStatus{ + LastIndexedFinalizedSlot: lastIndexedFinalizedSlot, + LastIndexedFinalizedBlobSlot: lastIndexedFinalizedBlobSlot.Load(), + CurrentBlobIndexerId: bi.id, + LastUpdate: time.Now(), + BlobIndexerVersion: version.Version, + } + if status.LastIndexedFinalizedBlobSlot > newBlobIndexerStatus.LastIndexedFinalizedBlobSlot { + newBlobIndexerStatus.LastIndexedFinalizedBlobSlot = status.LastIndexedFinalizedBlobSlot + } + err := bi.putIndexerStatus(newBlobIndexerStatus) + if err != nil { + return fmt.Errorf("error updating indexer status at slot %v: %w", batchEnd, err) + } + slotsPerSecond := float64(batchEnd-batchStart) / time.Since(batchStartTs).Seconds() + blobsPerSecond := float64(batchBlobsIndexed.Load()) / time.Since(batchStartTs).Seconds() + estimatedTimeToHead := float64(headHeader.Data.Header.Message.Slot-batchStart) / slotsPerSecond + estimatedTimeToHeadDuration := time.Duration(estimatedTimeToHead) * time.Second + log.InfoWithFields(log.Fields{ + "lastIdxFinSlot": newBlobIndexerStatus.LastIndexedFinalizedSlot, + "lastIdxFinBlobSlot": newBlobIndexerStatus.LastIndexedFinalizedBlobSlot, + "batch": fmt.Sprintf("%d-%d", batchStart, batchEnd), + "duration": time.Since(batchStartTs), + "slotsPerSecond": fmt.Sprintf("%.3f", slotsPerSecond), + "blobsPerSecond": fmt.Sprintf("%.3f", blobsPerSecond), + "estimatedTimeToHead": estimatedTimeToHeadDuration, + "blobsIndexed": batchBlobsIndexed.Load(), + }, "updated indexer status") + if !utils.Config.BlobIndexer.DisableStatusReports { + services.ReportStatus("blobindexer", "Running", nil) + } + } + return nil +} + +func (bi *BlobIndexer) indexBlobsAtSlot(slot uint64) (int, error) { + tGetBlobSidcar := time.Now() + + blobSidecar, err := bi.cl.GetBlobSidecars(slot) + if err != nil { + httpErr := network.SpecificError(err) + if httpErr != nil && httpErr.StatusCode == http.StatusNotFound { + // no sidecar for this slot + return 0, nil + } + return 0, err + } + metrics.TaskDuration.WithLabelValues("blobindexer_get_blob_sidecars").Observe(time.Since(tGetBlobSidcar).Seconds()) + + if len(blobSidecar.Data) <= 0 { + return 0, nil + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*20) + defer cancel() + + g, gCtx := errgroup.WithContext(ctx) + g.SetLimit(4) + for _, d := range blobSidecar.Data { + d := d + versionedBlobHash := fmt.Sprintf("%#x", utils.VersionedBlobHash(d.KzgCommitment).Bytes()) + key := fmt.Sprintf("%s/blobs/%s", bi.networkID, versionedBlobHash) + + if bi.writtenBlobsCache.Contains(key) { + continue + } + + g.Go(func() error { + select { + case <-gCtx.Done(): + return gCtx.Err() + default: + } + + if enableCheckingBeforePutting { + tS3HeadObj := time.Now() + _, err = bi.S3Client.HeadObject(gCtx, &s3.HeadObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + }) + metrics.TaskDuration.WithLabelValues("blobindexer_check_blob").Observe(time.Since(tS3HeadObj).Seconds()) + if err != nil { + // Only put the object if it does not exist yet + var httpResponseErr *awshttp.ResponseError + if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == http.StatusNotFound || httpResponseErr.HTTPStatusCode() == 403) { + return nil + } + return fmt.Errorf("error getting headObject: %s (%v/%v): %w", key, d.SignedBlockHeader.Message.Slot, d.Index, err) + } + } + + tS3PutObj := time.Now() + _, putErr := bi.S3Client.PutObject(gCtx, &s3.PutObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + Body: bytes.NewReader(d.Blob), + Metadata: map[string]string{ + "blob_index": fmt.Sprintf("%d", d.Index), + "block_slot": fmt.Sprintf("%d", d.SignedBlockHeader.Message.Slot), + "block_proposer": fmt.Sprintf("%d", d.SignedBlockHeader.Message.ProposerIndex), + "block_state_root": d.SignedBlockHeader.Message.StateRoot.String(), + "block_parent_root": d.SignedBlockHeader.Message.ParentRoot.String(), + "block_body_root": d.SignedBlockHeader.Message.BodyRoot.String(), + "kzg_commitment": d.KzgCommitment.String(), + "kzg_proof": d.KzgProof.String(), + }, + }) + metrics.TaskDuration.WithLabelValues("blobindexer_put_blob").Observe(time.Since(tS3PutObj).Seconds()) + if putErr != nil { + return fmt.Errorf("error putting object: %s (%v/%v): %w", key, d.SignedBlockHeader.Message.Slot, d.Index, putErr) + } + bi.writtenBlobsCache.Add(key, true) + + return nil + }) + } + err = g.Wait() + if err != nil { + return len(blobSidecar.Data), fmt.Errorf("error indexing blobs at slot %v: %w", slot, err) + } + + return len(blobSidecar.Data), nil +} + +func (bi *BlobIndexer) GetIndexerStatus() (*BlobIndexerStatus, error) { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("blobindexer_get_indexer_status").Observe(time.Since(start).Seconds()) + }() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + key := fmt.Sprintf("%s/blob-indexer-status.json", bi.networkID) + obj, err := bi.S3Client.GetObject(ctx, &s3.GetObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + }) + if err != nil { + // If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. + var httpResponseErr *awshttp.ResponseError + if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == 404 || httpResponseErr.HTTPStatusCode() == 403) { + return &BlobIndexerStatus{}, nil + } + return nil, err + } + status := &BlobIndexerStatus{} + err = json.NewDecoder(obj.Body).Decode(status) + return status, err +} + +func (bi *BlobIndexer) putIndexerStatus(status BlobIndexerStatus) error { + start := time.Now() + defer func() { + metrics.TaskDuration.WithLabelValues("blobindexer_put_indexer_status").Observe(time.Since(start).Seconds()) + }() + ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) + defer cancel() + key := fmt.Sprintf("%s/blob-indexer-status.json", bi.networkID) + contentType := "application/json" + body, err := json.Marshal(&status) + if err != nil { + return err + } + _, err = bi.S3Client.PutObject(ctx, &s3.PutObjectInput{ + Bucket: &utils.Config.BlobIndexer.S3.Bucket, + Key: &key, + Body: bytes.NewReader(body), + ContentType: &contentType, + Metadata: map[string]string{ + "last_indexed_finalized_slot": fmt.Sprintf("%d", status.LastIndexedFinalizedSlot), + "last_indexed_finalized_blob_slot": fmt.Sprintf("%d", status.LastIndexedFinalizedBlobSlot), + "current_blob_indexer_id": status.CurrentBlobIndexerId, + "last_update": status.LastUpdate.Format(time.RFC3339), + "blob_indexer_version": status.BlobIndexerVersion, + }, + }) + if err != nil { + return err + } + return nil +} + +type BlobIndexerStatus struct { + LastIndexedFinalizedSlot uint64 `json:"last_indexed_finalized_slot"` // last finalized slot that was indexed + LastIndexedFinalizedBlobSlot uint64 `json:"last_indexed_finalized_blob_slot"` // last finalized slot that included a blob + CurrentBlobIndexerId string `json:"current_blob_indexer_id"` + LastUpdate time.Time `json:"last_update"` + BlobIndexerVersion string `json:"blob_indexer_version"` +} diff --git a/backend/pkg/blobindexer/blobs.go b/backend/pkg/blobindexer/blobs.go deleted file mode 100644 index ae99d2d1b..000000000 --- a/backend/pkg/blobindexer/blobs.go +++ /dev/null @@ -1,333 +0,0 @@ -package blobindexer - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "sync" - "time" - - "github.com/gobitfly/beaconchain/pkg/commons/log" - "github.com/gobitfly/beaconchain/pkg/commons/metrics" - "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/gobitfly/beaconchain/pkg/commons/version" - "github.com/gobitfly/beaconchain/pkg/consapi" - - "github.com/gobitfly/beaconchain/pkg/consapi/network" - constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" - - "github.com/aws/aws-sdk-go-v2/aws" - awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/coocood/freecache" - "golang.org/x/sync/errgroup" -) - -type BlobIndexer struct { - S3Client *s3.Client - running bool - runningMu *sync.Mutex - clEndpoint string - cache *freecache.Cache - cl consapi.Client -} - -func NewBlobIndexer() (*BlobIndexer, error) { - s3Resolver := aws.EndpointResolverWithOptionsFunc(func(service, region string, options ...interface{}) (aws.Endpoint, error) { - return aws.Endpoint{ - PartitionID: "aws", - URL: utils.Config.BlobIndexer.S3.Endpoint, - SigningRegion: "us-east-2", - HostnameImmutable: true, - }, nil - }) - s3Client := s3.NewFromConfig(aws.Config{ - Region: "us-east-2", - Credentials: credentials.NewStaticCredentialsProvider( - utils.Config.BlobIndexer.S3.AccessKeyId, - utils.Config.BlobIndexer.S3.AccessKeySecret, - "", - ), - EndpointResolverWithOptions: s3Resolver, - }, func(o *s3.Options) { - o.UsePathStyle = true - }) - bi := &BlobIndexer{ - S3Client: s3Client, - runningMu: &sync.Mutex{}, - clEndpoint: "http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port, - cache: freecache.NewCache(1024 * 1024), - cl: consapi.NewClient("http://" + utils.Config.Indexer.Node.Host + ":" + utils.Config.Indexer.Node.Port), - } - return bi, nil -} - -func (bi *BlobIndexer) Start() { - bi.runningMu.Lock() - if bi.running { - bi.runningMu.Unlock() - return - } - bi.running = true - bi.runningMu.Unlock() - - log.InfoWithFields(log.Fields{"version": version.Version, "clEndpoint": bi.clEndpoint, "s3Endpoint": utils.Config.BlobIndexer.S3.Endpoint}, "starting blobindexer") - for { - err := bi.Index() - if err != nil { - log.Error(err, "failed indexing blobs", 0) - } - time.Sleep(time.Second * 10) - } -} - -func (bi *BlobIndexer) Index() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - headHeader := &constypes.StandardBeaconHeaderResponse{} - finalizedHeader := &constypes.StandardBeaconHeaderResponse{} - spec := &constypes.StandardSpecResponse{} - - g, gCtx := errgroup.WithContext(ctx) - g.SetLimit(3) - g.Go(func() error { - var err error - spec, err = bi.cl.GetSpec() - if err != nil { - return err - } - return nil - }) - g.Go(func() error { - var err error - headHeader, err = bi.cl.GetBlockHeader("head") - if err != nil { - return err - } - return nil - }) - g.Go(func() error { - var err error - finalizedHeader, err = bi.cl.GetBlockHeader("finalized") - if err != nil { - return err - } - return nil - }) - err := g.Wait() - if err != nil { - return err - } - - nodeDepositNetworkId := uint64(spec.Data.DepositNetworkID) - if utils.Config.Chain.ClConfig.DepositNetworkID != nodeDepositNetworkId { - return fmt.Errorf("config.DepositNetworkId != node.DepositNetworkId: %v != %v", utils.Config.Chain.ClConfig.DepositNetworkID, nodeDepositNetworkId) - } - - status, err := bi.GetIndexerStatus() - if err != nil { - return err - } - - denebForkSlot := utils.Config.Chain.ClConfig.DenebForkEpoch * utils.Config.Chain.ClConfig.SlotsPerEpoch - startSlot := status.LastIndexedFinalizedSlot + 1 - if status.LastIndexedFinalizedSlot <= denebForkSlot { - startSlot = denebForkSlot - } - - if headHeader.Data.Header.Message.Slot <= startSlot { - return fmt.Errorf("headHeader.Data.Header.Message.Slot <= startSlot: %v < %v", headHeader.Data.Header.Message.Slot, startSlot) - } - - start := time.Now() - log.InfoWithFields(log.Fields{"lastIndexedFinalizedSlot": status.LastIndexedFinalizedSlot, "headSlot": headHeader.Data.Header.Message.Slot}, "indexing blobs") - defer func() { - log.InfoWithFields(log.Fields{ - "startSlot": startSlot, - "endSlot": headHeader.Data.Header.Message.Slot, - "duration": time.Since(start), - }, "finished indexing blobs") - }() - - batchSize := uint64(100) - for batchStart := startSlot; batchStart <= headHeader.Data.Header.Message.Slot; batchStart += batchSize { - batchEnd := batchStart + batchSize - if batchEnd > headHeader.Data.Header.Message.Slot { - batchEnd = headHeader.Data.Header.Message.Slot - } - g, gCtx = errgroup.WithContext(context.Background()) - g.SetLimit(4) - for slot := batchStart; slot <= batchEnd; slot++ { - slot := slot - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - } - err := bi.IndexBlobsAtSlot(slot) - if err != nil { - return err - } - return nil - }) - } - err = g.Wait() - if err != nil { - return err - } - if batchEnd <= finalizedHeader.Data.Header.Message.Slot { - err := bi.PutIndexerStatus(BlobIndexerStatus{ - LastIndexedFinalizedSlot: batchEnd, - }) - if err != nil { - return fmt.Errorf("error updating indexer status at slot %v: %w", batchEnd, err) - } - log.InfoWithFields(log.Fields{"lastIndexedFinalizedSlot": batchEnd}, "updated indexer status") - } - } - return nil -} - -func (bi *BlobIndexer) IndexBlobsAtSlot(slot uint64) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - - tGetBlobSidcar := time.Now() - - blobSidecar, err := bi.cl.GetBlobSidecars(slot) - if err != nil { - httpErr := network.SpecificError(err) - if httpErr != nil && httpErr.StatusCode == http.StatusNotFound { - // no sidecar for this slot - return nil - } - return err - } - metrics.TaskDuration.WithLabelValues("blobindexer_get_blob_sidecars").Observe(time.Since(tGetBlobSidcar).Seconds()) - - if len(blobSidecar.Data) <= 0 { - return nil - } - - g, gCtx := errgroup.WithContext(ctx) - g.SetLimit(4) - for _, d := range blobSidecar.Data { - d := d - g.Go(func() error { - select { - case <-gCtx.Done(): - return gCtx.Err() - default: - } - - versionedBlobHash := fmt.Sprintf("%#x", utils.VersionedBlobHash(d.KzgCommitment).Bytes()) - key := fmt.Sprintf("blobs/%s", versionedBlobHash) - - tS3HeadObj := time.Now() - _, err = bi.S3Client.HeadObject(gCtx, &s3.HeadObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - }) - metrics.TaskDuration.WithLabelValues("blobindexer_check_blob").Observe(time.Since(tS3HeadObj).Seconds()) - if err != nil { - // Only put the object if it does not exist yet - var httpResponseErr *awshttp.ResponseError - if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == http.StatusNotFound || httpResponseErr.HTTPStatusCode() == 403) { - tS3PutObj := time.Now() - _, putErr := bi.S3Client.PutObject(gCtx, &s3.PutObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - Body: bytes.NewReader(d.Blob), - Metadata: map[string]string{ - "slot": fmt.Sprintf("%d", d.Slot), - "index": fmt.Sprintf("%d", d.Index), - "block_root": d.BlockRoot.String(), - "block_parent_root": d.BlockParentRoot.String(), - "proposer_index": fmt.Sprintf("%d", d.ProposerIndex), - "kzg_commitment": d.KzgCommitment.String(), - "kzg_proof": d.KzgProof.String(), - }, - }) - metrics.TaskDuration.WithLabelValues("blobindexer_put_blob").Observe(time.Since(tS3PutObj).Seconds()) - if putErr != nil { - return fmt.Errorf("error putting object: %s (%v/%v): %w", key, d.Slot, d.Index, putErr) - } - return nil - } - return fmt.Errorf("error getting headObject: %s (%v/%v): %w", key, d.Slot, d.Index, err) - } - return nil - }) - } - err = g.Wait() - if err != nil { - return fmt.Errorf("error indexing blobs at slot %v: %w", slot, err) - } - - return nil -} - -func (bi *BlobIndexer) GetIndexerStatus() (*BlobIndexerStatus, error) { - start := time.Now() - defer func() { - metrics.TaskDuration.WithLabelValues("blobindexer_get_indexer_status").Observe(time.Since(start).Seconds()) - }() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - key := "blob-indexer-status.json" - obj, err := bi.S3Client.GetObject(ctx, &s3.GetObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - }) - if err != nil { - // If the object that you request doesn’t exist, the error that Amazon S3 returns depends on whether you also have the s3:ListBucket permission. If you have the s3:ListBucket permission on the bucket, Amazon S3 returns an HTTP status code 404 (Not Found) error. If you don’t have the s3:ListBucket permission, Amazon S3 returns an HTTP status code 403 ("access denied") error. - var httpResponseErr *awshttp.ResponseError - if errors.As(err, &httpResponseErr) && (httpResponseErr.HTTPStatusCode() == 404 || httpResponseErr.HTTPStatusCode() == 403) { - return &BlobIndexerStatus{}, nil - } - return nil, err - } - status := &BlobIndexerStatus{} - err = json.NewDecoder(obj.Body).Decode(status) - return status, err -} - -func (bi *BlobIndexer) PutIndexerStatus(status BlobIndexerStatus) error { - start := time.Now() - defer func() { - metrics.TaskDuration.WithLabelValues("blobindexer_put_indexer_status").Observe(time.Since(start).Seconds()) - }() - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - defer cancel() - key := "blob-indexer-status.json" - contentType := "application/json" - body, err := json.Marshal(&status) - if err != nil { - return err - } - _, err = bi.S3Client.PutObject(ctx, &s3.PutObjectInput{ - Bucket: &utils.Config.BlobIndexer.S3.Bucket, - Key: &key, - Body: bytes.NewReader(body), - ContentType: &contentType, - Metadata: map[string]string{ - "last_indexed_finalized_slot": fmt.Sprintf("%d", status.LastIndexedFinalizedSlot), - }, - }) - if err != nil { - return err - } - return nil -} - -type BlobIndexerStatus struct { - LastIndexedFinalizedSlot uint64 `json:"last_indexed_finalized_slot"` - // LastIndexedFinalizedRoot string `json:"last_indexed_finalized_root"` - // IndexedUnfinalized map[string]uint64 `json:"indexed_unfinalized"` -} diff --git a/backend/pkg/commons/db/bigtable.go b/backend/pkg/commons/db/bigtable.go index edd9b3c17..7b7cd147e 100644 --- a/backend/pkg/commons/db/bigtable.go +++ b/backend/pkg/commons/db/bigtable.go @@ -194,7 +194,7 @@ func (bigtable *Bigtable) GetClient() *gcp_bigtable.Client { return bigtable.client } -func (bigtable *Bigtable) SaveMachineMetric(process string, userID uint64, machine string, data []byte) error { +func (bigtable *Bigtable) SaveMachineMetric(process string, userID types.UserId, machine string, data []byte) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() @@ -234,7 +234,7 @@ func (bigtable *Bigtable) SaveMachineMetric(process string, userID uint64, machi return nil } -func (bigtable Bigtable) getMachineMetricNamesMap(userID uint64, searchDepth int) (map[string]bool, error) { +func (bigtable Bigtable) getMachineMetricNamesMap(userID types.UserId, searchDepth int) (map[string]bool, error) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30)) defer cancel() @@ -265,7 +265,7 @@ func (bigtable Bigtable) getMachineMetricNamesMap(userID uint64, searchDepth int return machineNames, nil } -func (bigtable Bigtable) GetMachineMetricsMachineNames(userID uint64) ([]string, error) { +func (bigtable Bigtable) GetMachineMetricsMachineNames(userID types.UserId) ([]string, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -288,7 +288,7 @@ func (bigtable Bigtable) GetMachineMetricsMachineNames(userID uint64) ([]string, return result, nil } -func (bigtable Bigtable) GetMachineMetricsMachineCount(userID uint64) (uint64, error) { +func (bigtable Bigtable) GetMachineMetricsMachineCount(userID types.UserId) (uint64, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -310,7 +310,7 @@ func (bigtable Bigtable) GetMachineMetricsMachineCount(userID uint64) (uint64, e return uint64(card), nil } -func (bigtable Bigtable) GetMachineMetricsNode(userID uint64, limit, offset int) ([]*types.MachineMetricNode, error) { +func (bigtable Bigtable) GetMachineMetricsNode(userID types.UserId, limit, offset int) ([]*types.MachineMetricNode, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -335,7 +335,7 @@ func (bigtable Bigtable) GetMachineMetricsNode(userID uint64, limit, offset int) ) } -func (bigtable Bigtable) GetMachineMetricsValidator(userID uint64, limit, offset int) ([]*types.MachineMetricValidator, error) { +func (bigtable Bigtable) GetMachineMetricsValidator(userID types.UserId, limit, offset int) ([]*types.MachineMetricValidator, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -360,7 +360,7 @@ func (bigtable Bigtable) GetMachineMetricsValidator(userID uint64, limit, offset ) } -func (bigtable Bigtable) GetMachineMetricsSystem(userID uint64, limit, offset int) ([]*types.MachineMetricSystem, error) { +func (bigtable Bigtable) GetMachineMetricsSystem(userID types.UserId, limit, offset int) ([]*types.MachineMetricSystem, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -385,7 +385,7 @@ func (bigtable Bigtable) GetMachineMetricsSystem(userID uint64, limit, offset in ) } -func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | types.MachineMetricValidator](bigtable Bigtable, process string, userID uint64, limit, offset int, marshler func(data []byte, machine string) *T) ([]*T, error) { +func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | types.MachineMetricValidator](bigtable Bigtable, process string, userID types.UserId, limit, offset int, marshler func(data []byte, machine string) *T) ([]*T, error) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30)) defer cancel() @@ -429,7 +429,7 @@ func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | t return res, nil } -func (bigtable Bigtable) GetMachineRowKey(userID uint64, process string, machine string) string { +func (bigtable Bigtable) GetMachineRowKey(userID types.UserId, process string, machine string) string { return fmt.Sprintf("u:%s:p:%s:m:%s", bigtable.reversePaddedUserID(userID), process, machine) } @@ -437,7 +437,7 @@ func (bigtable Bigtable) GetMachineRowKey(userID uint64, process string, machine // machineData contains the latest machine data in CurrentData // and 5 minute old data in fiveMinuteOldData (defined in limit) // as well as the insert timestamps of both -func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable.RowList) (map[uint64]map[string]*types.MachineMetricSystemUser, error) { +func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable.RowList) (map[types.UserId]map[string]*types.MachineMetricSystemUser, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "rowKeys": rowKeys, @@ -449,7 +449,7 @@ func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable. ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*200)) defer cancel() - res := make(map[uint64]map[string]*types.MachineMetricSystemUser) // userID -> machine -> data + res := make(map[types.UserId]map[string]*types.MachineMetricSystemUser) // userID -> machine -> data limit := 5 @@ -509,7 +509,7 @@ func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable. } //nolint:unparam -func machineMetricRowParts(r string) (bool, uint64, string, string) { +func machineMetricRowParts(r string) (bool, types.UserId, string, string) { keySplit := strings.Split(r, ":") userID, err := strconv.ParseUint(keySplit[1], 10, 64) @@ -526,7 +526,7 @@ func machineMetricRowParts(r string) (bool, uint64, string, string) { process := keySplit[3] - return true, userID, machine, process + return true, types.UserId(userID), machine, process } func (bigtable *Bigtable) SaveValidatorBalances(epoch uint64, validators []*types.Validator) error { @@ -2678,8 +2678,8 @@ func GetCurrentDayClIncome(validator_indices []uint64) (map[uint64]int64, error) return dayIncome, nil } -func (bigtable *Bigtable) reversePaddedUserID(userID uint64) string { - return fmt.Sprintf("%09d", ^uint64(0)-userID) +func (bigtable *Bigtable) reversePaddedUserID(userID types.UserId) string { + return fmt.Sprintf("%09d", ^uint64(0)-uint64(userID)) } func (bigtable *Bigtable) reversedPaddedEpoch(epoch uint64) string { diff --git a/backend/pkg/commons/db/db.go b/backend/pkg/commons/db/db.go index bc1489514..929728950 100644 --- a/backend/pkg/commons/db/db.go +++ b/backend/pkg/commons/db/db.go @@ -957,22 +957,8 @@ func GetTotalEligibleEther() (uint64, error) { } // GetValidatorsGotSlashed returns the validators that got slashed after `epoch` either by an attestation violation or a proposer violation -func GetValidatorsGotSlashed(epoch uint64) ([]struct { - Epoch uint64 `db:"epoch"` - SlasherIndex uint64 `db:"slasher"` - SlasherPubkey string `db:"slasher_pubkey"` - SlashedValidatorIndex uint64 `db:"slashedvalidator"` - SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` - Reason string `db:"reason"` -}, error) { - var dbResult []struct { - Epoch uint64 `db:"epoch"` - SlasherIndex uint64 `db:"slasher"` - SlasherPubkey string `db:"slasher_pubkey"` - SlashedValidatorIndex uint64 `db:"slashedvalidator"` - SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` - Reason string `db:"reason"` - } +func GetValidatorsGotSlashed(epoch uint64) ([]*types.SlashingInfo, error) { + var dbResult []*types.SlashingInfo err := ReaderDb.Select(&dbResult, ` WITH slashings AS ( diff --git a/backend/pkg/commons/db/subscriptions.go b/backend/pkg/commons/db/subscriptions.go index 3edbc3d5f..639f70c8b 100644 --- a/backend/pkg/commons/db/subscriptions.go +++ b/backend/pkg/commons/db/subscriptions.go @@ -261,8 +261,8 @@ func GetSubscriptions(filter GetSubscriptionsFilter) ([]*types.Subscription, err } // UpdateSubscriptionsLastSent updates `last_sent_ts` column of the `users_subscriptions` table. -func UpdateSubscriptionsLastSent(subscriptionIDs []uint64, sent time.Time, epoch uint64, useDB *sqlx.DB) error { - _, err := useDB.Exec(` +func UpdateSubscriptionsLastSent(subscriptionIDs []uint64, sent time.Time, epoch uint64) error { + _, err := FrontendWriterDB.Exec(` UPDATE users_subscriptions SET last_sent_ts = TO_TIMESTAMP($1), last_sent_epoch = $2 WHERE id = ANY($3)`, sent.Unix(), epoch, pq.Array(subscriptionIDs)) diff --git a/backend/pkg/commons/log/log.go b/backend/pkg/commons/log/log.go index 60684f6cb..15f27eb10 100644 --- a/backend/pkg/commons/log/log.go +++ b/backend/pkg/commons/log/log.go @@ -14,17 +14,17 @@ import ( // Fatal logs a fatal error with callstack info that skips callerSkip many levels with arbitrarily many additional infos. // callerSkip equal to 0 gives you info directly where Fatal is called. func Fatal(err error, errorMsg interface{}, callerSkip int, additionalInfos ...Fields) { - logErrorInfo(err, callerSkip, additionalInfos...).Fatal(errorMsg) + logErrorInfo(err, callerSkip, false, additionalInfos...).Fatal(errorMsg) } // Error logs an error with callstack info that skips callerSkip many levels with arbitrarily many additional infos. // callerSkip equal to 0 gives you info directly where Error is called. func Error(err error, errorMsg interface{}, callerSkip int, additionalInfos ...Fields) { - logErrorInfo(err, callerSkip, additionalInfos...).Error(errorMsg) + logErrorInfo(err, callerSkip, false, additionalInfos...).Error(errorMsg) } func WarnWithStackTrace(err error, errorMsg interface{}, callerSkip int, additionalInfos ...Fields) { - logErrorInfo(err, callerSkip, additionalInfos...).Warn(errorMsg) + logErrorInfo(err, callerSkip, true, additionalInfos...).Warn(errorMsg) } func Info(args ...interface{}) { @@ -67,7 +67,7 @@ func Debugf(format string, args ...interface{}) { logrus.Debugf(format, args...) } -func logErrorInfo(err error, callerSkip int, additionalInfos ...Fields) *logrus.Entry { +func logErrorInfo(err error, callerSkip int, isWarning bool, additionalInfos ...Fields) *logrus.Entry { logFields := logrus.NewEntry(logrus.New()) metricName := "unknown" @@ -88,7 +88,9 @@ func logErrorInfo(err error, callerSkip int, additionalInfos ...Fields) *logrus. if len(metricName) > 30 { metricName = metricName[len(metricName)-30:] } - metrics.Errors.WithLabelValues(metricName).Inc() + if !isWarning { + metrics.Errors.WithLabelValues(metricName).Inc() + } errColl := []string{} for { diff --git a/backend/pkg/commons/types/config.go b/backend/pkg/commons/types/config.go index 5518cb131..9668cfd02 100644 --- a/backend/pkg/commons/types/config.go +++ b/backend/pkg/commons/types/config.go @@ -61,11 +61,13 @@ type Config struct { } `yaml:"bigtable"` BlobIndexer struct { S3 struct { - Endpoint string `yaml:"endpoint" envconfig:"BLOB_INDEXER_S3_ENDPOINT"` - Bucket string `yaml:"bucket" envconfig:"BLOB_INDEXER_S3_BUCKET"` - AccessKeyId string `yaml:"accessKeyId" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_ID"` - AccessKeySecret string `yaml:"accessKeySecret" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_SECRET"` + Endpoint string `yaml:"endpoint" envconfig:"BLOB_INDEXER_S3_ENDPOINT"` // s3 endpoint + Bucket string `yaml:"bucket" envconfig:"BLOB_INDEXER_S3_BUCKET"` // s3 bucket + AccessKeyId string `yaml:"accessKeyId" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_ID"` // s3 access key id + AccessKeySecret string `yaml:"accessKeySecret" envconfig:"BLOB_INDEXER_S3_ACCESS_KEY_SECRET"` // s3 access key secret } `yaml:"s3"` + PruneMarginEpochs uint64 `yaml:"pruneMarginEpochs" envconfig:"BLOB_INDEXER_PRUNE_MARGIN_EPOCHS"` // PruneMarginEpochs helps blobindexer to decide if connected node has pruned too far to have no holes in the data, set it to same value as lighthouse flag --blob-prune-margin-epochs + DisableStatusReports bool `yaml:"disableStatusReports" envconfig:"BLOB_INDEXER_DISABLE_STATUS_REPORTS"` // disable status reports (no connection to db needed) } `yaml:"blobIndexer"` Chain struct { Name string `yaml:"name" envconfig:"CHAIN_NAME"` diff --git a/backend/pkg/commons/types/exporter.go b/backend/pkg/commons/types/exporter.go index 5a3f3a397..197638472 100644 --- a/backend/pkg/commons/types/exporter.go +++ b/backend/pkg/commons/types/exporter.go @@ -709,3 +709,12 @@ type RedisCachedValidatorsMapping struct { Epoch Epoch Mapping []*CachedValidator } + +type SlashingInfo struct { + Epoch uint64 `db:"epoch"` + SlasherIndex uint64 `db:"slasher"` + SlasherPubkey string `db:"slasher_pubkey"` + SlashedValidatorIndex uint64 `db:"slashedvalidator"` + SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` + Reason string `db:"reason"` +} diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index ee589fcbb..5a8485c24 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -4,14 +4,17 @@ import ( "database/sql" "database/sql/driver" "encoding/json" + "fmt" "html/template" "math/big" "strings" "time" - "firebase.google.com/go/messaging" + "firebase.google.com/go/v4/messaging" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/consapi/types" "github.com/lib/pq" "github.com/pkg/errors" "golang.org/x/text/cases" @@ -19,31 +22,53 @@ import ( ) type EventName string +type EventFilter string + +type NotificationsPerUserId map[UserId]map[EventName]map[EventFilter]Notification + +func (npui NotificationsPerUserId) AddNotification(n Notification) { + if n.GetUserId() == 0 { + log.Fatal(fmt.Errorf("Notification user id is 0"), fmt.Sprintf("Notification: %v", n), 1) + } + if n.GetEventName() == "" { + log.Fatal(fmt.Errorf("Notification event name is empty"), fmt.Sprintf("Notification: %v", n), 1) + } + // next check is disabled as there are events that do not require a filter (rocketpool, network events) + // if n.GetEventFilter() == "" { + // log.Fatal(fmt.Errorf("Notification event filter is empty"), fmt.Sprintf("Notification: %v", n), 0) + // } + + if _, ok := npui[n.GetUserId()]; !ok { + npui[n.GetUserId()] = make(map[EventName]map[EventFilter]Notification) + } + if _, ok := npui[n.GetUserId()][n.GetEventName()]; !ok { + npui[n.GetUserId()][n.GetEventName()] = make(map[EventFilter]Notification) + } + npui[n.GetUserId()][n.GetEventName()][EventFilter(n.GetEventFilter())] = n +} const ( - ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" - ValidatorMissedProposalEventName EventName = "validator_proposal_missed" - ValidatorExecutedProposalEventName EventName = "validator_proposal_submitted" - ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" - ValidatorGotSlashedEventName EventName = "validator_got_slashed" - ValidatorDidSlashEventName EventName = "validator_did_slash" - ValidatorIsOfflineEventName EventName = "validator_is_offline" - ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" - ValidatorReceivedDepositEventName EventName = "validator_received_deposit" - NetworkSlashingEventName EventName = "network_slashing" - NetworkValidatorActivationQueueFullEventName EventName = "network_validator_activation_queue_full" - NetworkValidatorActivationQueueNotFullEventName EventName = "network_validator_activation_queue_not_full" - NetworkValidatorExitQueueFullEventName EventName = "network_validator_exit_queue_full" - NetworkValidatorExitQueueNotFullEventName EventName = "network_validator_exit_queue_not_full" - NetworkLivenessIncreasedEventName EventName = "network_liveness_increased" - EthClientUpdateEventName EventName = "eth_client_update" - MonitoringMachineOfflineEventName EventName = "monitoring_machine_offline" - MonitoringMachineDiskAlmostFullEventName EventName = "monitoring_hdd_almostfull" - MonitoringMachineCpuLoadEventName EventName = "monitoring_cpu_load" - MonitoringMachineMemoryUsageEventName EventName = "monitoring_memory_usage" - MonitoringMachineSwitchedToETH2FallbackEventName EventName = "monitoring_fallback_eth2inuse" - MonitoringMachineSwitchedToETH1FallbackEventName EventName = "monitoring_fallback_eth1inuse" - TaxReportEventName EventName = "user_tax_report" + ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" + ValidatorMissedProposalEventName EventName = "validator_proposal_missed" + ValidatorExecutedProposalEventName EventName = "validator_proposal_submitted" + ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" + ValidatorGotSlashedEventName EventName = "validator_got_slashed" + ValidatorDidSlashEventName EventName = "validator_did_slash" + ValidatorIsOfflineEventName EventName = "validator_is_offline" + ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" + ValidatorReceivedDepositEventName EventName = "validator_received_deposit" + NetworkSlashingEventName EventName = "network_slashing" + NetworkValidatorActivationQueueFullEventName EventName = "network_validator_activation_queue_full" + NetworkValidatorActivationQueueNotFullEventName EventName = "network_validator_activation_queue_not_full" + NetworkValidatorExitQueueFullEventName EventName = "network_validator_exit_queue_full" + NetworkValidatorExitQueueNotFullEventName EventName = "network_validator_exit_queue_not_full" + NetworkLivenessIncreasedEventName EventName = "network_liveness_increased" + EthClientUpdateEventName EventName = "eth_client_update" + MonitoringMachineOfflineEventName EventName = "monitoring_machine_offline" + MonitoringMachineDiskAlmostFullEventName EventName = "monitoring_hdd_almostfull" + MonitoringMachineCpuLoadEventName EventName = "monitoring_cpu_load" + MonitoringMachineMemoryUsageEventName EventName = "monitoring_memory_usage" + TaxReportEventName EventName = "user_tax_report" //nolint:misspell RocketpoolCommissionThresholdEventName EventName = "rocketpool_commision_threshold" RocketpoolNewClaimRoundStartedEventName EventName = "rocketpool_new_claimround" @@ -60,8 +85,6 @@ var MachineEvents = []EventName{ MonitoringMachineDiskAlmostFullEventName, MonitoringMachineCpuLoadEventName, MonitoringMachineMemoryUsageEventName, - MonitoringMachineSwitchedToETH2FallbackEventName, - MonitoringMachineSwitchedToETH1FallbackEventName, } var UserIndexEvents = []EventName{ @@ -72,39 +95,43 @@ var UserIndexEvents = []EventName{ MonitoringMachineDiskAlmostFullEventName, MonitoringMachineCpuLoadEventName, MonitoringMachineMemoryUsageEventName, - MonitoringMachineSwitchedToETH2FallbackEventName, - MonitoringMachineSwitchedToETH1FallbackEventName, +} + +var UserIndexEventsMap = map[EventName]struct{}{ + EthClientUpdateEventName: {}, + MonitoringMachineCpuLoadEventName: {}, + MonitoringMachineOfflineEventName: {}, + MonitoringMachineDiskAlmostFullEventName: {}, + MonitoringMachineMemoryUsageEventName: {}, } var EventLabel map[EventName]string = map[EventName]string{ - ValidatorBalanceDecreasedEventName: "Your validator(s) balance decreased", - ValidatorMissedProposalEventName: "Your validator(s) missed a proposal", - ValidatorExecutedProposalEventName: "Your validator(s) submitted a proposal", - ValidatorMissedAttestationEventName: "Your validator(s) missed an attestation", - ValidatorGotSlashedEventName: "Your validator(s) got slashed", - ValidatorDidSlashEventName: "Your validator(s) slashed another validator", - ValidatorIsOfflineEventName: "Your validator(s) state changed", - ValidatorReceivedDepositEventName: "Your validator(s) received a deposit", - ValidatorReceivedWithdrawalEventName: "A withdrawal was initiated for your validators", - NetworkSlashingEventName: "A slashing event has been registered by the network", - NetworkValidatorActivationQueueFullEventName: "The activation queue is full", - NetworkValidatorActivationQueueNotFullEventName: "The activation queue is empty", - NetworkValidatorExitQueueFullEventName: "The validator exit queue is full", - NetworkValidatorExitQueueNotFullEventName: "The validator exit queue is empty", - NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", - EthClientUpdateEventName: "An Ethereum client has a new update available", - MonitoringMachineOfflineEventName: "Your machine(s) might be offline", - MonitoringMachineDiskAlmostFullEventName: "Your machine(s) disk space is running low", - MonitoringMachineCpuLoadEventName: "Your machine(s) has a high CPU load", - MonitoringMachineMemoryUsageEventName: "Your machine(s) has a high memory load", - MonitoringMachineSwitchedToETH2FallbackEventName: "Your machine(s) is using its consensus client fallback", - MonitoringMachineSwitchedToETH1FallbackEventName: "Your machine(s) is using its execution client fallback", - TaxReportEventName: "You have an available tax report", - RocketpoolCommissionThresholdEventName: "Your configured Rocket Pool commission threshold is reached", - RocketpoolNewClaimRoundStartedEventName: "Your Rocket Pool claim from last round is available", - RocketpoolCollateralMinReached: "You reached the Rocket Pool min RPL collateral", - RocketpoolCollateralMaxReached: "You reached the Rocket Pool max RPL collateral", - SyncCommitteeSoon: "Your validator(s) will soon be part of the sync committee", + ValidatorBalanceDecreasedEventName: "Your validator(s) balance decreased", + ValidatorMissedProposalEventName: "Your validator(s) missed a proposal", + ValidatorExecutedProposalEventName: "Your validator(s) submitted a proposal", + ValidatorMissedAttestationEventName: "Your validator(s) missed an attestation", + ValidatorGotSlashedEventName: "Your validator(s) got slashed", + ValidatorDidSlashEventName: "Your validator(s) slashed another validator", + ValidatorIsOfflineEventName: "Your validator(s) state changed", + ValidatorReceivedDepositEventName: "Your validator(s) received a deposit", + ValidatorReceivedWithdrawalEventName: "A withdrawal was initiated for your validators", + NetworkSlashingEventName: "A slashing event has been registered by the network", + NetworkValidatorActivationQueueFullEventName: "The activation queue is full", + NetworkValidatorActivationQueueNotFullEventName: "The activation queue is empty", + NetworkValidatorExitQueueFullEventName: "The validator exit queue is full", + NetworkValidatorExitQueueNotFullEventName: "The validator exit queue is empty", + NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", + EthClientUpdateEventName: "An Ethereum client has a new update available", + MonitoringMachineOfflineEventName: "Your machine(s) might be offline", + MonitoringMachineDiskAlmostFullEventName: "Your machine(s) disk space is running low", + MonitoringMachineCpuLoadEventName: "Your machine(s) has a high CPU load", + MonitoringMachineMemoryUsageEventName: "Your machine(s) has a high memory load", + TaxReportEventName: "You have an available tax report", + RocketpoolCommissionThresholdEventName: "Your configured Rocket Pool commission threshold is reached", + RocketpoolNewClaimRoundStartedEventName: "Your Rocket Pool claim from last round is available", + RocketpoolCollateralMinReached: "You reached the Rocket Pool min RPL collateral", + RocketpoolCollateralMaxReached: "You reached the Rocket Pool max RPL collateral", + SyncCommitteeSoon: "Your validator(s) will soon be part of the sync committee", } func IsUserIndexed(event EventName) bool { @@ -145,8 +172,6 @@ var EventNames = []EventName{ MonitoringMachineOfflineEventName, MonitoringMachineDiskAlmostFullEventName, MonitoringMachineCpuLoadEventName, - MonitoringMachineSwitchedToETH2FallbackEventName, - MonitoringMachineSwitchedToETH1FallbackEventName, MonitoringMachineMemoryUsageEventName, TaxReportEventName, RocketpoolCommissionThresholdEventName, @@ -164,7 +189,7 @@ type EventNameDesc struct { } type MachineMetricSystemUser struct { - UserID uint64 + UserID UserId Machine string CurrentData *MachineMetricSystem CurrentDataInsertTs int64 @@ -247,25 +272,96 @@ type Notification interface { GetTitle() string GetEventFilter() string GetEmailAttachment() *EmailAttachment - GetUnsubscribeHash() string GetInfoMarkdown() string + GetUserId() UserId +} + +type NotificationBaseImpl struct { + LatestState string + SubscriptionID uint64 + EventName EventName + Epoch uint64 + Info string + Title string + EventFilter string + EmailAttachment *EmailAttachment + InfoMarkdown string + UserID UserId +} + +func (n NotificationBaseImpl) GetLatestState() string { + return n.LatestState +} + +func (n NotificationBaseImpl) GetSubscriptionID() uint64 { + return n.SubscriptionID +} + +func (n NotificationBaseImpl) GetEventName() EventName { + return n.EventName +} + +func (n NotificationBaseImpl) GetEpoch() uint64 { + return n.Epoch +} + +func (n NotificationBaseImpl) GetInfo(includeUrl bool) string { + return n.Info +} + +func (n NotificationBaseImpl) GetTitle() string { + return n.Title +} + +func (n NotificationBaseImpl) GetEventFilter() string { + return n.EventFilter +} + +func (n NotificationBaseImpl) GetEmailAttachment() *EmailAttachment { + return n.EmailAttachment +} + +func (n NotificationBaseImpl) GetInfoMarkdown() string { + return n.InfoMarkdown +} + +func (n NotificationBaseImpl) GetUserId() UserId { + return n.UserID } // func UnMarschal type Subscription struct { ID *uint64 `db:"id,omitempty"` - UserID *uint64 `db:"user_id,omitempty"` - EventName string `db:"event_name"` + UserID *UserId `db:"user_id,omitempty"` + EventName EventName `db:"event_name"` EventFilter string `db:"event_filter"` LastSent *time.Time `db:"last_sent_ts"` LastEpoch *uint64 `db:"last_sent_epoch"` // Channels pq.StringArray `db:"channels"` - CreatedTime time.Time `db:"created_ts"` - CreatedEpoch uint64 `db:"created_epoch"` - EventThreshold float64 `db:"event_threshold"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash" swaggertype:"string"` - State sql.NullString `db:"internal_state" swaggertype:"string"` + CreatedTime time.Time `db:"created_ts"` + CreatedEpoch uint64 `db:"created_epoch"` + EventThreshold float64 `db:"event_threshold"` + // State sql.NullString `db:"internal_state" swaggertype:"string"` + GroupId *int64 + DashboardId *int64 +} + +type UserId uint64 +type DashboardId uint64 +type DashboardGroupId uint64 +type ValidatorDashboardConfig struct { + DashboardsByUserId map[UserId]map[DashboardId]*ValidatorDashboard +} + +type ValidatorDashboard struct { + Name string `db:"name"` + Groups map[DashboardGroupId]*ValidatorDashboardGroup +} + +type ValidatorDashboardGroup struct { + Name string `db:"name"` + Validators []types.ValidatorIndex } type TaggedValidators struct { @@ -455,7 +551,6 @@ type Email struct { Title string Body template.HTML SubscriptionManageURL template.HTML - UnsubURL template.HTML } type UserWebhook struct { diff --git a/backend/pkg/commons/utils/config.go b/backend/pkg/commons/utils/config.go index ad8529fd0..64a783b78 100644 --- a/backend/pkg/commons/utils/config.go +++ b/backend/pkg/commons/utils/config.go @@ -359,15 +359,19 @@ func setCLConfig(cfg *types.Config) error { maxForkEpoch := uint64(18446744073709551615) if jr.Data.AltairForkEpoch == nil { + log.Warnf("AltairForkEpoch not set, defaulting to maxForkEpoch") jr.Data.AltairForkEpoch = &maxForkEpoch } if jr.Data.BellatrixForkEpoch == nil { + log.Warnf("BellatrixForkEpoch not set, defaulting to maxForkEpoch") jr.Data.BellatrixForkEpoch = &maxForkEpoch } if jr.Data.CapellaForkEpoch == nil { + log.Warnf("CapellaForkEpoch not set, defaulting to maxForkEpoch") jr.Data.CapellaForkEpoch = &maxForkEpoch } if jr.Data.DenebForkEpoch == nil { + log.Warnf("DenebForkEpoch not set, defaulting to maxForkEpoch") jr.Data.DenebForkEpoch = &maxForkEpoch } diff --git a/backend/pkg/consapi/types/blobs.go b/backend/pkg/consapi/types/blobs.go index 236cefd8f..c8a94c60b 100644 --- a/backend/pkg/consapi/types/blobs.go +++ b/backend/pkg/consapi/types/blobs.go @@ -4,13 +4,20 @@ import "github.com/ethereum/go-ethereum/common/hexutil" type StandardBlobSidecarsResponse struct { Data []struct { - BlockRoot hexutil.Bytes `json:"block_root"` - Index uint64 `json:"index,string"` - Slot uint64 `json:"slot,string"` - BlockParentRoot hexutil.Bytes `json:"block_parent_root"` - ProposerIndex uint64 `json:"proposer_index,string"` - KzgCommitment hexutil.Bytes `json:"kzg_commitment"` - KzgProof hexutil.Bytes `json:"kzg_proof"` - Blob hexutil.Bytes `json:"blob"` + Index uint64 `json:"index,string"` + Blob hexutil.Bytes `json:"blob"` + KzgCommitment hexutil.Bytes `json:"kzg_commitment"` + KzgProof hexutil.Bytes `json:"kzg_proof"` + SignedBlockHeader struct { + Message struct { + Slot uint64 `json:"slot,string"` + ProposerIndex uint64 `json:"proposer_index,string"` + ParentRoot hexutil.Bytes `json:"parent_root"` + StateRoot hexutil.Bytes `json:"state_root"` + BodyRoot hexutil.Bytes `json:"body_root"` + } `json:"message"` + Signature hexutil.Bytes `json:"signature"` + } `json:"signed_block_header"` + KzgCommitmentInclusionProof []hexutil.Bytes `json:"kzg_commitment_inclusion_proof"` } } diff --git a/backend/pkg/consapi/types/spec.go b/backend/pkg/consapi/types/spec.go index 4241b15a4..181fd7b65 100644 --- a/backend/pkg/consapi/types/spec.go +++ b/backend/pkg/consapi/types/spec.go @@ -112,4 +112,9 @@ type StandardSpec struct { DomainSyncCommittee string `json:"DOMAIN_SYNC_COMMITTEE"` BlsWithdrawalPrefix string `json:"BLS_WITHDRAWAL_PREFIX"` ZeroHash [32]byte // ZeroHash is used to represent a zeroed out 32 byte array. + // DENEB + MaxRequestBlocksDeneb *uint64 `json:"MAX_REQUEST_BLOCKS_DENEB,string"` + MaxRequestBlobSidecars *uint64 `json:"MAX_REQUEST_BLOB_SIDECARS,string"` + MinEpochsForBlobSidecarsRequests *uint64 `json:"MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,string"` + BlobSidecarSubnetCount *uint64 `json:"BLOB_SIDECAR_SUBNET_COUNT,string"` } diff --git a/backend/pkg/exporter/db/db.go b/backend/pkg/exporter/db/db.go index b58c72c6a..0d092c1d1 100644 --- a/backend/pkg/exporter/db/db.go +++ b/backend/pkg/exporter/db/db.go @@ -523,6 +523,14 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie return fmt.Errorf("error preparing insert validator statement: %w", err) } + validatorStatusUpdateStmt, err := tx.Prepare(`UPDATE validators SET status = $1 WHERE validatorindex = $2;`) + if err != nil { + return fmt.Errorf("error preparing update validator status statement: %w", err) + } + + log.Info("updating validator status and metadata") + valiudatorUpdateTs := time.Now() + updates := 0 for _, v := range validators { // exchange farFutureEpoch with the corresponding max sql value @@ -610,10 +618,14 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie } if c.Status != v.Status { - log.Infof("Status changed for validator %v from %v to %v", v.Index, c.Status, v.Status) - log.Infof("v.ActivationEpoch %v, latestEpoch %v, lastAttestationSlots[v.Index] %v, thresholdSlot %v, lastGlobalAttestedEpoch: %v, lastValidatorAttestedEpoch: %v", v.ActivationEpoch, latestEpoch, lastAttestationSlot, thresholdSlot, lastGlobalAttestedEpoch, lastValidatorAttestedEpoch) - queries.WriteString(fmt.Sprintf("UPDATE validators SET status = '%s' WHERE validatorindex = %d;\n", v.Status, c.Index)) - updates++ + log.Debugf("Status changed for validator %v from %v to %v", v.Index, c.Status, v.Status) + log.Debugf("v.ActivationEpoch %v, latestEpoch %v, lastAttestationSlots[v.Index] %v, thresholdSlot %v, lastGlobalAttestedEpoch: %v, lastValidatorAttestedEpoch: %v", v.ActivationEpoch, latestEpoch, lastAttestationSlot, thresholdSlot, lastGlobalAttestedEpoch, lastValidatorAttestedEpoch) + //queries.WriteString(fmt.Sprintf("UPDATE validators SET status = '%s' WHERE validatorindex = %d;\n", v.Status, c.Index)) + _, err := validatorStatusUpdateStmt.Exec(v.Status, c.Index) + if err != nil { + return fmt.Errorf("error updating validator status: %w", err) + } + //updates++ } // if c.Balance != v.Balance { // // log.LogInfo("Balance changed for validator %v from %v to %v", v.Index, c.Balance, v.Balance) @@ -658,6 +670,11 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie } } + err = validatorStatusUpdateStmt.Close() + if err != nil { + return fmt.Errorf("error closing validator status update statement: %w", err) + } + err = insertStmt.Close() if err != nil { return fmt.Errorf("error closing insert validator statement: %w", err) @@ -673,6 +690,7 @@ func SaveValidators(epoch uint64, validators []*types.Validator, client rpc.Clie } log.Infof("validator table update completed, took %v", time.Since(updateStart)) } + log.Infof("updating validator status and metadata completed, took %v", time.Since(valiudatorUpdateTs)) s := time.Now() newValidators := []struct { diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index eed83b0f1..c2e2b3cec 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -1,55 +1,98 @@ package notification import ( - "encoding/hex" + "strings" + "github.com/doug-martin/goqu/v9" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) -func GetSubsForEventFilter(eventName types.EventName) ([][]byte, map[string][]types.Subscription, error) { +// Retrieves all subscription for a given event filter +// Map key corresponds to the event filter which can be +// a validator pubkey or an eth1 address (for RPL notifications) +// or a list of validators for the tax report notifications +// or a machine name for machine notifications or a eth client name for ethereum client update notifications +// optionally it is possible to set a filter on the last sent ts and the event filter +// fields +func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, lastSentFilterArgs []interface{}, eventFilters []string) (map[string][]types.Subscription, error) { var subs []types.Subscription - subQuery := ` - SELECT id, user_id, event_filter, last_sent_epoch, created_epoch, event_threshold, ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, internal_state from users_subscriptions where event_name = $1 - ` + + // subQuery := ` + // SELECT + // id, + // user_id, + // event_filter, + // last_sent_epoch, + // created_epoch, + // event_threshold, + // ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, + // internal_state + // from users_subscriptions + // where event_name = $1 + // ` + + eventNameForQuery := utils.GetNetwork() + ":" + string(eventName) + + if _, ok := types.UserIndexEventsMap[eventName]; ok { + eventNameForQuery = string(eventName) + } + ds := goqu.Dialect("postgres").From("users_subscriptions").Select( + goqu.C("id"), + goqu.C("user_id"), + goqu.C("event_filter"), + goqu.C("last_sent_epoch"), + goqu.C("created_epoch"), + goqu.C("event_threshold"), + goqu.C("event_name"), + ).Where(goqu.L("(event_name = ? AND user_id <> 0)", eventNameForQuery)) + + if lastSentFilter != "" { + if len(lastSentFilterArgs) > 0 { + ds = ds.Where(goqu.L(lastSentFilter, lastSentFilterArgs...)) + } else { + ds = ds.Where(goqu.L(lastSentFilter)) + } + } + if len(eventFilters) > 0 { + ds = ds.Where(goqu.L("event_filter = ANY(?)", pq.StringArray(eventFilters))) + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, err + } subMap := make(map[string][]types.Subscription, 0) - err := db.FrontendWriterDB.Select(&subs, subQuery, utils.GetNetwork()+":"+string(eventName)) + err = db.FrontendWriterDB.Select(&subs, query, args...) if err != nil { - return nil, nil, err + return nil, err } - filtersEncode := make([][]byte, 0, len(subs)) + log.Infof("Found %d subscriptions for event %s", len(subs), eventName) + for _, sub := range subs { + sub.EventName = types.EventName(strings.Replace(string(sub.EventName), utils.GetNetwork()+":", "", 1)) // remove the network name from the event name if _, ok := subMap[sub.EventFilter]; !ok { subMap[sub.EventFilter] = make([]types.Subscription, 0) } - subMap[sub.EventFilter] = append(subMap[sub.EventFilter], types.Subscription{ - UserID: sub.UserID, - ID: sub.ID, - LastEpoch: sub.LastEpoch, - EventFilter: sub.EventFilter, - CreatedEpoch: sub.CreatedEpoch, - EventThreshold: sub.EventThreshold, - State: sub.State, - }) - - b, _ := hex.DecodeString(sub.EventFilter) - filtersEncode = append(filtersEncode, b) + subMap[sub.EventFilter] = append(subMap[sub.EventFilter], sub) } - return filtersEncode, subMap, nil + + return subMap, nil } -func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { - pushByID := map[uint64][]string{} +func GetUserPushTokenByIds(ids []types.UserId) (map[types.UserId][]string, error) { + pushByID := map[types.UserId][]string{} if len(ids) == 0 { return pushByID, nil } var rows []struct { - ID uint64 `db:"user_id"` - Token string `db:"notification_token"` + ID types.UserId `db:"user_id"` + Token string `db:"notification_token"` } err := db.FrontendWriterDB.Select(&rows, "SELECT DISTINCT ON (user_id, notification_token) user_id, notification_token FROM users_devices WHERE (user_id = ANY($1) AND user_id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)) AND notify_enabled = true AND active = true AND notification_token IS NOT NULL AND LENGTH(notification_token) > 20 ORDER BY user_id, notification_token, id DESC", pq.Array(ids), types.PushNotificationChannel) @@ -69,14 +112,14 @@ func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { } // GetUserEmailsByIds returns the emails of users. -func GetUserEmailsByIds(ids []uint64) (map[uint64]string, error) { - mailsByID := map[uint64]string{} +func GetUserEmailsByIds(ids []types.UserId) (map[types.UserId]string, error) { + mailsByID := map[types.UserId]string{} if len(ids) == 0 { return mailsByID, nil } var rows []struct { - ID uint64 `db:"id"` - Email string `db:"email"` + ID types.UserId `db:"id"` + Email string `db:"email"` } // err := db.FrontendWriterDB.Select(&rows, "SELECT id, email FROM users WHERE id = ANY($1) AND id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)", pq.Array(ids), types.EmailNotificationChannel) diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index 91291da54..b4a75a3d6 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -2,12 +2,13 @@ package notification import ( "context" + "fmt" "strings" "time" - firebase "firebase.google.com/go" - "firebase.google.com/go/messaging" + firebase "firebase.google.com/go/v4" + "firebase.google.com/go/v4/messaging" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/utils" "google.golang.org/api/option" @@ -17,17 +18,19 @@ func isRelevantError(response *messaging.SendResponse) bool { if !response.Success && response.Error != nil { // Ignore https://stackoverflow.com/questions/58308835/using-firebase-for-notifications-getting-app-instance-has-been-unregistered // Errors since they indicate that the user token is expired - if !strings.Contains(response.Error.Error(), "registration-token-not-registered") { + if !strings.Contains(response.Error.Error(), "registration-token-not-registered") && + !strings.Contains(response.Error.Error(), "Requested entity was not found.") && + !strings.Contains(response.Error.Error(), "Request contains an invalid argument.") { return true } } return false } -func SendPushBatch(messages []*messaging.Message) error { +func SendPushBatch(messages []*messaging.Message, dryRun bool) error { credentialsPath := utils.Config.Notifications.FirebaseCredentialsPath if credentialsPath == "" { - log.Error(nil, "firebase credentials path not provided, disabling push notifications", 0) + log.Error(fmt.Errorf("firebase credentials path not provided, disabling push notifications"), "error initializing SendPushBatch", 0) return nil } @@ -42,29 +45,32 @@ func SendPushBatch(messages []*messaging.Message) error { app, err := firebase.NewApp(context.Background(), nil, opt) if err != nil { - log.Error(nil, "error initializing app", 0) + log.Error(err, "error initializing app", 0) return err } client, err := app.Messaging(ctx) if err != nil { - log.Error(nil, "error initializing messaging", 0) + log.Error(err, "error initializing messaging", 0) return err } - var waitBeforeTryInSeconds = []time.Duration{0 * time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second} + var waitBeforeTryInSeconds = []int{0, 2, 4, 8, 16} var resultSuccessCount, resultFailureCount int = 0, 0 var result *messaging.BatchResponse currentMessages := messages tries := 0 for _, s := range waitBeforeTryInSeconds { - time.Sleep(s) + time.Sleep(time.Duration(s) * time.Second) tries++ - - result, err = client.SendAll(context.Background(), currentMessages) + if dryRun { + result, err = client.SendEachDryRun(context.Background(), currentMessages) + } else { + result, err = client.SendEach(context.Background(), currentMessages) + } if err != nil { - log.Error(nil, "error sending push notifications", 0) + log.Error(err, "error sending push notifications", 0) return err } @@ -74,7 +80,9 @@ func SendPushBatch(messages []*messaging.Message) error { newMessages := make([]*messaging.Message, 0, result.FailureCount) if result.FailureCount > 0 { for i, response := range result.Responses { + //log.Info(response) if isRelevantError(response) { + log.Infof("retrying message %d", i) newMessages = append(newMessages, currentMessages[i]) resultFailureCount-- } @@ -90,7 +98,7 @@ func SendPushBatch(messages []*messaging.Message) error { if len(currentMessages) > 0 { for _, response := range result.Responses { if isRelevantError(response) { - log.Error(nil, "firebase error", 0, log.Fields{"MessageID": response.MessageID, "response": response.Error}) + log.Error(fmt.Errorf("firebase error, message id: %s, error: %s", response.MessageID, response.Error), "error sending push notifications", 0) resultFailureCount++ } } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 1ba2266bf..60b6a8769 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -3,15 +3,15 @@ package notification import ( "bytes" "context" - "crypto/sha256" "database/sql" "database/sql/driver" "encoding/hex" "encoding/json" "errors" + "maps" + "slices" "fmt" - "html" "html/template" "io" "math/big" @@ -22,7 +22,7 @@ import ( "time" gcp_bigtable "cloud.google.com/go/bigtable" - "firebase.google.com/go/messaging" + "firebase.google.com/go/v4/messaging" "github.com/ethereum/go-ethereum/common" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" @@ -34,7 +34,6 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/jmoiron/sqlx" "github.com/lib/pq" "github.com/rocket-pool/rocketpool-go/utils/eth" "golang.org/x/text/cases" @@ -46,6 +45,22 @@ func InitNotificationSender() { go notificationSender() } +func GetNotificationsForEpoch(pubkeyCachePath string, epoch uint64) (types.NotificationsPerUserId, error) { + err := initPubkeyCache(pubkeyCachePath) + if err != nil { + log.Fatal(err, "error initializing pubkey cache path for notifications", 0) + } + return collectNotifications(epoch) +} + +func GetUserNotificationsForEpoch(pubkeyCachePath string, epoch uint64) (types.NotificationsPerUserId, error) { + err := initPubkeyCache(pubkeyCachePath) + if err != nil { + log.Fatal(err, "error initializing pubkey cache path for notifications", 0) + } + return collectUserDbNotifications(epoch) +} + func InitNotificationCollector(pubkeyCachePath string) { err := initPubkeyCache(pubkeyCachePath) if err != nil { @@ -126,7 +141,7 @@ func notificationCollector() { break } - queueNotifications(notifications, db.FrontendWriterDB) // this caused the collected notifications to be queued and sent + queueNotifications(notifications) // this caused the collected notifications to be queued and sent // Network DB Notifications (user related, must only run on one instance ever!!!!) if utils.Config.Notifications.UserDBNotifications { @@ -139,7 +154,7 @@ func notificationCollector() { continue } - queueNotifications(userNotifications, db.FrontendWriterDB) + queueNotifications(userNotifications) } log.InfoWithFields(log.Fields{"notifications": len(notifications), "duration": time.Since(start), "epoch": epoch}, "notifications completed") @@ -168,7 +183,7 @@ func notificationSender() { if err != nil { log.Error(err, "error getting advisory lock from db", 0) - conn.Close() + err := conn.Close() if err != nil { log.Error(err, "error returning connection to connection pool", 0) } @@ -177,12 +192,12 @@ func notificationSender() { } log.Infof("lock obtained") - err = dispatchNotifications(db.FrontendWriterDB) + err = dispatchNotifications() if err != nil { log.Error(err, "error dispatching notifications", 0) } - err = garbageCollectNotificationQueue(db.FrontendWriterDB) + err = garbageCollectNotificationQueue() if err != nil { log.Error(err, "error garbage collecting notification queue", 0) } @@ -225,12 +240,13 @@ func notificationSender() { } } -func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[uint64]map[types.EventName][]types.Notification{} +func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} start := time.Now() var err error var dbIsCoherent bool + // do a consistency check to make sure that we have all the data we need in the db err = db.WriterDb.Get(&dbIsCoherent, ` SELECT NOT (array[false] && array_agg(is_coherent)) AS is_coherent @@ -253,6 +269,64 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. log.Infof("started collecting notifications") + // type dashboardDefinitionRow struct { + // DashboardId types.DashboardId `db:"dashboard_id"` + // DashboardName string `db:"dashboard_name"` + // UserId types.UserId `db:"user_id"` + // GroupId types.DashboardGroupId `db:"group_id"` + // GroupName string `db:"group_name"` + // ValidatorIndex types.ValidatorIndex `db:"validator_index"` + // } + + // log.Infof("retrieving dashboard definitions") + // // Retrieve all dashboard definitions to be able to retrieve validators included in + // // the group notification subscriptions + // // TODO: add a filter to retrieve only groups that have notifications enabled + // // Needs a new field in the db + // var dashboardDefinitions []dashboardDefinitionRow + // err = db.AlloyWriter.Select(&dashboardDefinitions, ` + // select + // users_val_dashboards.id as dashboard_id, + // users_val_dashboards.name as dashboard_name, + // users_val_dashboards.user_id, + // users_val_dashboards_groups.id as group_id, + // users_val_dashboards_groups.name as group_name, + // users_val_dashboards_validators.validator_index + // from users_val_dashboards + // left join users_val_dashboards_groups on users_val_dashboards_groups.dashboard_id = users_val_dashboards.id + // left join users_val_dashboards_validators on users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id; + // `) + // if err != nil { + // return nil, fmt.Errorf("error getting dashboard definitions: %v", err) + // } + + // // Now initialize the validator dashboard configuration map + // validatorDashboardConfig := &types.ValidatorDashboardConfig{ + // DashboardsByUserId: make(map[types.UserId]map[types.DashboardId]*types.ValidatorDashboard), + // } + // for _, row := range dashboardDefinitions { + // if validatorDashboardConfig.DashboardsByUserId[row.UserId] == nil { + // validatorDashboardConfig.DashboardsByUserId[row.UserId] = make(map[types.DashboardId]*types.ValidatorDashboard) + // } + // if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] == nil { + // validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] = &types.ValidatorDashboard{ + // Name: row.DashboardName, + // Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), + // } + // } + // if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] == nil { + // validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ + // Name: row.GroupName, + // Validators: []uint64{}, + // } + // } + // validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) + // } + + // TODO: pass the validatorDashboardConfig to the notification collection functions + // The following functions will collect the notifications and add them to the + // notificationsByUserID map. The notifications will be queued and sent later + // by the notification sender process err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() @@ -295,7 +369,7 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. } log.Infof("collecting withdrawal notifications took: %v", time.Since(start)) - err = collectNetworkNotifications(notificationsByUserID, types.NetworkLivenessIncreasedEventName) + err = collectNetworkNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_network").Inc() return nil, fmt.Errorf("error collecting network notifications: %v", err) @@ -315,7 +389,7 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. return nil, fmt.Errorf("error collecting rocketpool notifications: %v", err) } } else { - err = collectRocketpoolComissionNotifications(notificationsByUserID, types.RocketpoolCommissionThresholdEventName) + err = collectRocketpoolComissionNotifications(notificationsByUserID) if err != nil { //nolint:misspell metrics.Errors.WithLabelValues("notifications_collect_rocketpool_comission").Inc() @@ -323,7 +397,7 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. } log.Infof("collecting rocketpool commissions took: %v", time.Since(start)) - err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID, types.RocketpoolNewClaimRoundStartedEventName) + err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_rocketpool_reward_claim").Inc() return nil, fmt.Errorf("error collecting new rocketpool claim round: %v", err) @@ -346,7 +420,7 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. } } - err = collectSyncCommittee(notificationsByUserID, types.SyncCommitteeSoon, epoch) + err = collectSyncCommittee(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_sync_committee").Inc() return nil, fmt.Errorf("error collecting sync committee: %v", err) @@ -356,8 +430,8 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. return notificationsByUserID, nil } -func collectUserDbNotifications(epoch uint64) (map[uint64]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[uint64]map[types.EventName][]types.Notification{} +func collectUserDbNotifications(epoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} var err error // Monitoring (premium): machine offline @@ -389,14 +463,14 @@ func collectUserDbNotifications(epoch uint64) (map[uint64]map[types.EventName][] } // New ETH clients - err = collectEthClientNotifications(notificationsByUserID, types.EthClientUpdateEventName) + err = collectEthClientNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_eth_client").Inc() return nil, fmt.Errorf("error collecting Eth client notifications: %v", err) } //Tax Report - err = collectTaxReportNotificationNotifications(notificationsByUserID, types.TaxReportEventName) + err = collectTaxReportNotificationNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_tax_report").Inc() return nil, fmt.Errorf("error collecting tax report notifications: %v", err) @@ -405,40 +479,20 @@ func collectUserDbNotifications(epoch uint64) (map[uint64]map[types.EventName][] return notificationsByUserID, nil } -func queueNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) { +func queueNotifications(notificationsByUserID types.NotificationsPerUserId) { subByEpoch := map[uint64][]uint64{} - // prevent multiple events being sent with the same subscription id - for user, notifications := range notificationsByUserID { - for eventType, events := range notifications { - filteredEvents := make([]types.Notification, 0) - - for _, ev := range events { - isDuplicate := false - for _, fe := range filteredEvents { - if fe.GetSubscriptionID() == ev.GetSubscriptionID() { - isDuplicate = true - } - } - if !isDuplicate { - filteredEvents = append(filteredEvents, ev) - } - } - notificationsByUserID[user][eventType] = filteredEvents - } - } - - err := queueEmailNotifications(notificationsByUserID, useDB) + err := queueEmailNotifications(notificationsByUserID) if err != nil { log.Error(err, "error queuing email notifications", 0) } - err = queuePushNotification(notificationsByUserID, useDB) + err = queuePushNotification(notificationsByUserID) if err != nil { log.Error(err, "error queuing push notifications", 0) } - err = queueWebhookNotifications(notificationsByUserID, useDB) + err = queueWebhookNotifications(notificationsByUserID) if err != nil { log.Error(err, "error queuing webhook notifications", 0) } @@ -455,9 +509,11 @@ func queueNotifications(notificationsByUserID map[uint64]map[types.EventName][]t } } } + + // obsolete as notifications are anyway sent on a per-epoch basis for epoch, subIDs := range subByEpoch { // update that we've queued the subscription (last sent rather means last queued) - err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch, useDB) + err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch) if err != nil { log.Error(err, "error updating sent-time of sent notifications", 0) metrics.Errors.WithLabelValues("notifications_updating_sent_time").Inc() @@ -483,6 +539,7 @@ func queueNotifications(notificationsByUserID map[uint64]map[types.EventName][]t } } + // no need to batch here as the internal state will become obsolete for state, subs := range stateToSub { subArray := make([]int64, 0) for subID := range subs { @@ -495,23 +552,23 @@ func queueNotifications(notificationsByUserID map[uint64]map[types.EventName][]t } } -func dispatchNotifications(useDB *sqlx.DB) error { - err := sendEmailNotifications(useDB) +func dispatchNotifications() error { + err := sendEmailNotifications() if err != nil { return fmt.Errorf("error sending email notifications, err: %w", err) } - err = sendPushNotifications(useDB) + err = sendPushNotifications() if err != nil { return fmt.Errorf("error sending push notifications, err: %w", err) } - err = sendWebhookNotifications(useDB) + err = sendWebhookNotifications() if err != nil { return fmt.Errorf("error sending webhook notifications, err: %w", err) } - err = sendDiscordNotifications(useDB) + err = sendDiscordNotifications() if err != nil { return fmt.Errorf("error sending webhook discord notifications, err: %w", err) } @@ -520,8 +577,8 @@ func dispatchNotifications(useDB *sqlx.DB) error { } // garbageCollectNotificationQueue deletes entries from the notification queue that have been processed -func garbageCollectNotificationQueue(useDB *sqlx.DB) error { - rows, err := useDB.Exec(`DELETE FROM notification_queue WHERE (sent < now() - INTERVAL '30 minutes') OR (created < now() - INTERVAL '1 hour')`) +func garbageCollectNotificationQueue() error { + rows, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE (sent < now() - INTERVAL '30 minutes') OR (created < now() - INTERVAL '1 hour')`) if err != nil { return fmt.Errorf("error deleting from notification_queue %w", err) } @@ -541,11 +598,8 @@ func getNetwork() string { return "" } -func queuePushNotification(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - userIDs := []uint64{} - for userID := range notificationsByUserID { - userIDs = append(userIDs, userID) - } +func queuePushNotification(notificationsByUserID types.NotificationsPerUserId) error { + userIDs := slices.Collect(maps.Keys(notificationsByUserID)) tokensByUserID, err := GetUserPushTokenByIds(userIDs) if err != nil { @@ -559,7 +613,10 @@ func queuePushNotification(notificationsByUserID map[uint64]map[types.EventName] continue } - go func(userTokens []string, userNotifications map[types.EventName][]types.Notification) { + // todo: this looks like a flawed approach to queue the notifications + // this will issue one db write per user, which is not optimal + // we should batch the notifications and write them in one go + go func(userTokens []string, userNotifications map[types.EventName]map[types.EventFilter]types.Notification) { var batch []*messaging.Message for event, ns := range userNotifications { for _, n := range ns { @@ -594,7 +651,7 @@ func queuePushNotification(notificationsByUserID map[uint64]map[types.EventName] Messages: batch, } - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'push', $2)`, time.Now(), transitPushContent) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'push', $2)`, time.Now(), transitPushContent) if err != nil { log.Error(err, "error writing transit push notification to db", 0) return @@ -604,10 +661,10 @@ func queuePushNotification(notificationsByUserID map[uint64]map[types.EventName] return nil } -func sendPushNotifications(useDB *sqlx.DB) error { +func sendPushNotifications() error { var notificationQueueItem []types.TransitPush - err := useDB.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -629,7 +686,7 @@ func sendPushNotifications(useDB *sqlx.DB) error { end = len(n.Content.Messages) } - err = SendPushBatch(n.Content.Messages[start:end]) + err = SendPushBatch(n.Content.Messages[start:end], false) if err != nil { metrics.Errors.WithLabelValues("notifications_send_push_batch").Inc() log.Error(err, "error sending firebase batch job", 0) @@ -637,7 +694,7 @@ func sendPushNotifications(useDB *sqlx.DB) error { metrics.NotificationsSent.WithLabelValues("push", "200").Add(float64(len(n.Content.Messages))) } - _, err = useDB.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) if err != nil { return fmt.Errorf("error updating sent status for push notification with id: %v, err: %w", n.Id, err) } @@ -646,11 +703,9 @@ func sendPushNotifications(useDB *sqlx.DB) error { return nil } -func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - userIDs := []uint64{} - for userID := range notificationsByUserID { - userIDs = append(userIDs, userID) - } +func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId) error { + userIDs := slices.Collect(maps.Keys(notificationsByUserID)) + emailsByUserID, err := GetUserEmailsByIds(userIDs) if err != nil { metrics.Errors.WithLabelValues("notifications_get_user_mail_by_id").Inc() @@ -665,7 +720,7 @@ func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventNam // metrics.Errors.WithLabelValues("notifications_mail_not_found").Inc() continue } - go func(userEmail string, userNotifications map[types.EventName][]types.Notification) { + go func(userEmail string, userNotifications map[types.EventName]map[types.EventFilter]types.Notification) { attachments := []types.EmailAttachment{} var msg types.Email @@ -688,8 +743,8 @@ func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventNam } //nolint:gosec // this is a static string msg.Body += template.HTML(fmt.Sprintf("%s
====

", types.EventLabel[event_title])) - unsubURL := "https://" + utils.Config.Frontend.SiteDomain + "/notifications/unsubscribe" - for i, n := range ns { + i := 0 + for _, n := range ns { // Find all unique notification titles for the subject title := n.GetTitle() if _, ok := notificationTitlesMap[title]; !ok { @@ -697,68 +752,6 @@ func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventNam notificationTitles = append(notificationTitles, title) } - unsubHash := n.GetUnsubscribeHash() - if unsubHash == "" { - id := n.GetSubscriptionID() - - tx, err := db.FrontendWriterDB.Beginx() - if err != nil { - log.Error(err, "error starting transaction", 0) - } - var sub types.Subscription - err = tx.Get(&sub, ` - SELECT - id, - user_id, - event_name, - event_filter, - last_sent_ts, - last_sent_epoch, - created_ts, - created_epoch, - event_threshold - FROM users_subscriptions - WHERE id = $1 - `, id) - if err != nil { - log.Error(err, "error getting user subscription by subscription id", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } - } - - raw := fmt.Sprintf("%v%v%v%v", sub.ID, sub.UserID, sub.EventName, sub.CreatedTime) - digest := sha256.Sum256([]byte(raw)) - - _, err = tx.Exec("UPDATE users_subscriptions set unsubscribe_hash = $1 WHERE id = $2", digest[:], id) - if err != nil { - log.Error(err, "error updating users subscriptions table with unsubscribe hash", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } - } - - err = tx.Commit() - if err != nil { - log.Error(err, "error committing transaction to update users subscriptions with an unsubscribe hash", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } - } - - unsubHash = hex.EncodeToString(digest[:]) - } - if i == 0 { - unsubURL += "?hash=" + html.EscapeString(unsubHash) - } else { - unsubURL += "&hash=" + html.EscapeString(unsubHash) - } - //nolint:gosec // this is a static string - msg.UnsubURL = template.HTML(fmt.Sprintf(`Unsubscribe`, unsubURL)) - if event != types.SyncCommitteeSoon { // SyncCommitteeSoon notifications are summed up in getEventInfo for all validators //nolint:gosec // this is a static string @@ -770,6 +763,7 @@ func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventNam } metrics.NotificationsQueued.WithLabelValues("email", string(event)).Inc() + i++ } eventInfo := getEventInfo(event, ns) @@ -798,7 +792,7 @@ func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventNam Attachments: attachments, } - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'email', $2)`, time.Now(), transitEmailContent) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'email', $2)`, time.Now(), transitEmailContent) if err != nil { log.Error(err, "error writing transit email to db", 0) } @@ -807,10 +801,10 @@ func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventNam return nil } -func sendEmailNotifications(useDb *sqlx.DB) error { +func sendEmailNotifications() error { var notificationQueueItem []types.TransitEmail - err := useDb.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -833,7 +827,7 @@ func sendEmailNotifications(useDb *sqlx.DB) error { metrics.NotificationsSent.WithLabelValues("email", "200").Inc() } } - _, err = useDb.Exec(`UPDATE notification_queue set sent = now() where id = $1`, n.Id) + _, err = db.WriterDb.Exec(`UPDATE notification_queue set sent = now() where id = $1`, n.Id) if err != nil { return fmt.Errorf("error updating sent status for email notification with id: %v, err: %w", n.Id, err) } @@ -841,16 +835,17 @@ func sendEmailNotifications(useDb *sqlx.DB) error { return nil } -func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { +func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserId) error { for userID, userNotifications := range notificationsByUserID { var webhooks []types.UserWebhook - err := useDB.Select(&webhooks, ` + err := db.FrontendWriterDB.Select(&webhooks, ` SELECT id, user_id, url, retries, event_names, + last_sent, destination FROM users_webhooks @@ -882,7 +877,7 @@ func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventN if len(notifications) > 0 { // reset Retries if w.Retries > 5 && w.LastSent.Valid && w.LastSent.Time.Add(time.Hour).Before(time.Now()) { - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = 0 WHERE id = $1;`, w.ID) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = 0 WHERE id = $1;`, w.ID) if err != nil { log.Error(err, "error updating users_webhooks table; setting retries to zero", 0) continue @@ -959,7 +954,7 @@ func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventN } // process notifs for _, n := range notifs { - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), $1, $2);`, n.Channel, n.Content) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), $1, $2);`, n.Channel, n.Content) if err != nil { log.Error(err, "error inserting into webhooks_queue", 0) } else { @@ -969,7 +964,7 @@ func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventN // process discord notifs for _, dNotifs := range discordNotifMap { for _, n := range dNotifs { - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), 'webhook_discord', $1);`, n) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), 'webhook_discord', $1);`, n) if err != nil { log.Error(err, "error inserting into webhooks_queue (discord)", 0) continue @@ -982,10 +977,10 @@ func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventN return nil } -func sendWebhookNotifications(useDB *sqlx.DB) error { +func sendWebhookNotifications() error { var notificationQueueItem []types.TransitWebhook - err := useDB.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -1002,7 +997,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { for _, n := range notificationQueueItem { // do not retry after 5 attempts if n.Content.Webhook.Retries > 5 { - _, err := db.FrontendWriterDB.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) + _, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) if err != nil { return fmt.Errorf("error deleting from notification queue: %w", err) } @@ -1018,7 +1013,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { _, err = url.Parse(n.Content.Webhook.Url) if err != nil { - _, err := db.FrontendWriterDB.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) + _, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) if err != nil { return fmt.Errorf("error deleting from notification queue: %w", err) } @@ -1038,14 +1033,14 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { } defer resp.Body.Close() - _, err = useDB.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) if err != nil { log.Error(err, "error updating notification_queue table", 0) return } if resp != nil && resp.StatusCode < 400 { - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = 0, last_sent = now() WHERE id = $1;`, n.Content.Webhook.ID) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = 0, last_sent = now() WHERE id = $1;`, n.Content.Webhook.ID) if err != nil { log.Error(err, "error updating users_webhooks table", 0) return @@ -1063,7 +1058,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { errResp.Body = string(b) } - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp) if err != nil { log.Error(err, "error updating users_webhooks table", 0) return @@ -1074,10 +1069,10 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { return nil } -func sendDiscordNotifications(useDB *sqlx.DB) error { +func sendDiscordNotifications() error { var notificationQueueItem []types.TransitDiscord - err := useDB.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -1098,7 +1093,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { for _, n := range notificationQueueItem { // purge the event from existence if the retry counter is over 5 if n.Content.Webhook.Retries > 5 { - _, err = db.FrontendWriterDB.Exec(`DELETE FROM notification_queue where id = $1`, n.Id) + _, err = db.WriterDb.Exec(`DELETE FROM notification_queue where id = $1`, n.Id) if err != nil { log.Warnf("failed to delete notification from queue: %v", err) } @@ -1113,10 +1108,12 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { notifMap[n.Content.Webhook.ID] = append(notifMap[n.Content.Webhook.ID], n) } for _, webhook := range webhookMap { + // todo: this has the potential to spin up thousands of go routines + // should use an errgroup instead if we decide to keep the aproach go func(webhook types.UserWebhook, reqs []types.TransitDiscord) { defer func() { // update retries counters in db based on end result - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = $1, last_sent = now() WHERE id = $2;`, webhook.Retries, webhook.ID) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = $1, last_sent = now() WHERE id = $2;`, webhook.Retries, webhook.ID) if err != nil { log.Warnf("failed to update retries counter to %v for webhook %v: %v", webhook.Retries, webhook.ID, err) } @@ -1126,7 +1123,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { for _, req := range reqs { ids = append(ids, req.Id) } - _, err = db.FrontendWriterDB.Exec(`UPDATE notification_queue SET sent = now() where id = ANY($1)`, pq.Array(ids)) + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() where id = ANY($1)`, pq.Array(ids)) if err != nil { log.Warnf("failed to update sent for notifcations in queue: %v", err) } @@ -1173,14 +1170,15 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { } errResp.Status = resp.Status resp.Body.Close() - } - if strings.Contains(errResp.Body, "You are being rate limited") { - log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) - } else { - log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) + if resp.StatusCode == http.StatusTooManyRequests { + log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) + } else { + log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) + } } - _, err = useDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) + + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) if err != nil { log.Error(err, "error storing failure data in users_webhooks table", 0) } @@ -1198,7 +1196,7 @@ func getUrlPart(validatorIndex uint64) string { return fmt.Sprintf(` For more information visit: https://%s/validator/%v.`, utils.Config.Frontend.SiteDomain, validatorIndex, utils.Config.Frontend.SiteDomain, validatorIndex) } -func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, status uint64, eventName types.EventName, epoch uint64) error { +func collectBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, status uint64, eventName types.EventName, epoch uint64) error { type dbResult struct { Proposer uint64 `db:"proposer"` Status uint64 `db:"status"` @@ -1207,7 +1205,7 @@ func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[type ExecRewardETH float64 } - _, subMap, err := GetSubsForEventFilter(eventName) + subMap, err := GetSubsForEventFilter(eventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) } @@ -1280,22 +1278,19 @@ func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[type } log.Infof("creating %v notification for validator %v in epoch %v", eventName, event.Proposer, epoch) n := &validatorProposalNotification{ - SubscriptionID: *sub.ID, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(pubkey), + }, ValidatorIndex: event.Proposer, - Epoch: epoch, Status: event.Status, - EventName: eventName, Reward: event.ExecRewardETH, - EventFilter: hex.EncodeToString(pubkey), Slot: event.Slot, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1304,43 +1299,12 @@ func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[type } type validatorProposalNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - ValidatorPublicKey string - Epoch uint64 - Slot uint64 - Status uint64 // * Can be 0 = scheduled, 1 executed, 2 missed */ - EventName types.EventName - EventFilter string - Reward float64 - UnsubscribeHash sql.NullString -} - -func (n *validatorProposalNotification) GetLatestState() string { - return "" -} - -func (n *validatorProposalNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorProposalNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} + types.NotificationBaseImpl -func (n *validatorProposalNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorProposalNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *validatorProposalNotification) GetEventName() types.EventName { - return n.EventName + ValidatorIndex uint64 + Slot uint64 + Status uint64 // * Can be 0 = scheduled, 1 executed, 2 missed */ + Reward float64 } func (n *validatorProposalNotification) GetInfo(includeUrl bool) string { @@ -1359,6 +1323,8 @@ func (n *validatorProposalNotification) GetInfo(includeUrl bool) string { generalPart = fmt.Sprintf(`Validator %s proposed block at slot %s with %v %v execution reward.`, vali, slot, n.Reward, utils.Config.Frontend.ElCurrency) case 2: generalPart = fmt.Sprintf(`Validator %s missed a block proposal at slot %s.`, vali, slot) + case 3: + generalPart = fmt.Sprintf(`Validator %s had an orphaned block proposal at slot %s.`, vali, slot) } return generalPart + suffix } @@ -1371,14 +1337,12 @@ func (n *validatorProposalNotification) GetTitle() string { return "New Block Proposal" case 2: return "Block Proposal Missed" + case 3: + return "Block Proposal Missed (Orphaned)" } return "-" } -func (n *validatorProposalNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *validatorProposalNotification) GetInfoMarkdown() string { var generalPart = "" switch n.Status { @@ -1388,13 +1352,17 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) proposed a new block at slot [%[3]v](https://%[1]v/slot/%[3]v) with %[4]v %[5]v execution reward.`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot, n.Reward, utils.Config.Frontend.ElCurrency) case 2: generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) missed a block proposal at slot [%[3]v](https://%[1]v/slot/%[3]v).`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot) + case 3: + generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) had an orphaned block proposal at slot [%[3]v](https://%[1]v/slot/%[3]v).`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot) } return generalPart } -func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - _, subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) +// collectAttestationAndOfflineValidatorNotifications collects notifications for missed attestations and offline validators +func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + // Retrieve subscriptions for missed attestations + subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) } @@ -1407,12 +1375,13 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma } // get attestations for all validators for the last 4 epochs - + // we need 4 epochs so that can detect the online / offline status of validators validators, err := db.GetValidatorIndices() if err != nil { return err } + // this reads the submitted attestations for the last 4 epochs participationPerEpoch, err := db.GetValidatorAttestationHistoryForNotifications(epoch-3, epoch) if err != nil { return fmt.Errorf("error getting validator attestations from db %w", err) @@ -1470,29 +1439,17 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorMissedAttestationEventName, event.ValidatorIndex, event.Epoch) n := &validatorAttestationNotification{ - SubscriptionID: *sub.ID, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: event.Epoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(event.EventFilter), + }, ValidatorIndex: event.ValidatorIndex, - Epoch: event.Epoch, Status: event.Status, - EventName: types.ValidatorMissedAttestationEventName, - EventFilter: hex.EncodeToString(event.EventFilter), } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - } - } - if isDuplicate { - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1573,7 +1530,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma return fmt.Errorf("retrieved more than %v online validators notifications: %v, exiting", onlineValidatorsLimit, len(onlineValidators)) } - _, subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName) + subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName, "", nil, nil) if err != nil { return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorIsOfflineEventName, err) } @@ -1587,34 +1544,20 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma } log.Infof("new event: validator %v detected as offline since epoch %v", validator.Index, epoch) - n := validatorIsOfflineNotification{ - SubscriptionID: *sub.ID, + n := &validatorIsOfflineNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + Epoch: epoch, + EventName: sub.EventName, + LatestState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting + EventFilter: hex.EncodeToString(validator.Pubkey), + UserID: *sub.UserID, + }, ValidatorIndex: validator.Index, IsOffline: true, - EventEpoch: epoch, - EventName: types.ValidatorIsOfflineEventName, - InternalState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting - EventFilter: hex.EncodeToString(validator.Pubkey), } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - break - } - } - if isDuplicate { - log.Infof("duplicate offline notification detected") - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], &n) + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1623,21 +1566,21 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma t := hex.EncodeToString(validator.Pubkey) subs := subMap[t] for _, sub := range subs { - if sub.State.String == "" || sub.State.String == "-" { // discard online notifications that do not have a corresponding offline notification - continue - } + // if sub.State.String == "" || sub.State.String == "-" { // discard online notifications that do not have a corresponding offline notification + // continue + // } - originalLastSeenEpoch, err := strconv.ParseUint(sub.State.String, 10, 64) - if err != nil { - // i have no idea what just happened. - return fmt.Errorf("this should never happen. couldn't parse state as uint64: %v", err) - } + // originalLastSeenEpoch, err := strconv.ParseUint(sub.State.String, 10, 64) + // if err != nil { + // // I have no idea what just happened. + // return fmt.Errorf("this should never happen. couldn't parse state as uint64: %v", err) + // } - epochsSinceOffline := epoch - originalLastSeenEpoch + // epochsSinceOffline := epoch - originalLastSeenEpoch - if epochsSinceOffline > epoch { // fix overflow - epochsSinceOffline = 4 - } + // if epochsSinceOffline > epoch { // fix overflow + // epochsSinceOffline = 4 + // } if sub.UserID == nil || sub.ID == nil { return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) @@ -1645,35 +1588,20 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) - n := validatorIsOfflineNotification{ - SubscriptionID: *sub.ID, + n := &validatorIsOfflineNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(validator.Pubkey), + LatestState: "-", + }, ValidatorIndex: validator.Index, IsOffline: false, - EventEpoch: epoch, - EventName: types.ValidatorIsOfflineEventName, - InternalState: "-", - EventFilter: hex.EncodeToString(validator.Pubkey), - EpochsOffline: epochsSinceOffline, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - break - } - } - if isDuplicate { - log.Infof("duplicate online notification detected") - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], &n) + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1682,45 +1610,25 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma } type validatorIsOfflineNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - EventEpoch uint64 - EpochsOffline uint64 - IsOffline bool - EventName types.EventName - EventFilter string - UnsubscribeHash sql.NullString - InternalState string -} + types.NotificationBaseImpl -func (n *validatorIsOfflineNotification) GetLatestState() string { - return n.InternalState -} - -func (n *validatorIsOfflineNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorIsOfflineNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorIsOfflineNotification) GetEpoch() uint64 { - return n.EventEpoch + ValidatorIndex uint64 + IsOffline bool } +// Overwrite specific methods func (n *validatorIsOfflineNotification) GetInfo(includeUrl bool) string { if n.IsOffline { if includeUrl { - return fmt.Sprintf(`Validator %[1]v is offline since epoch %[2]s).`, n.ValidatorIndex, n.InternalState, utils.Config.Frontend.SiteDomain) + return fmt.Sprintf(`Validator %[1]v is offline since epoch %[2]s).`, n.ValidatorIndex, n.LatestState, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.InternalState) + return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.LatestState) } } else { if includeUrl { - return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) + return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v.`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator %v is back online since epoch %v (was offline for %v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, n.EpochsOffline) + return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) } } } @@ -1733,54 +1641,24 @@ func (n *validatorIsOfflineNotification) GetTitle() string { } } -func (n *validatorIsOfflineNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorIsOfflineNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorIsOfflineNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - func (n *validatorIsOfflineNotification) GetInfoMarkdown() string { if n.IsOffline { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is offline since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain) + return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is offline since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v) (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) + return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } } +func (n *validatorIsOfflineNotification) GetEventName() types.EventName { + return types.ValidatorIsOfflineEventName +} + type validatorAttestationNotification struct { - SubscriptionID uint64 + types.NotificationBaseImpl + ValidatorIndex uint64 ValidatorPublicKey string - Epoch uint64 Status uint64 // * Can be 0 = scheduled | missed, 1 executed - EventName types.EventName - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorAttestationNotification) GetLatestState() string { - return "" -} - -func (n *validatorAttestationNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorAttestationNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorAttestationNotification) GetEpoch() uint64 { - return n.Epoch } func (n *validatorAttestationNotification) GetInfo(includeUrl bool) string { @@ -1814,21 +1692,6 @@ func (n *validatorAttestationNotification) GetTitle() string { return "-" } -func (n *validatorAttestationNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorAttestationNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorAttestationNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - func (n *validatorAttestationNotification) GetInfoMarkdown() string { var generalPart = "" switch n.Status { @@ -1840,41 +1703,16 @@ func (n *validatorAttestationNotification) GetInfoMarkdown() string { return generalPart } -type validatorGotSlashedNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - Epoch uint64 - Slasher uint64 - Reason string - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorGotSlashedNotification) GetLatestState() string { - return "" -} - -func (n *validatorGotSlashedNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorGotSlashedNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorGotSlashedNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID +func (n *validatorAttestationNotification) GetEventName() types.EventName { + return types.ValidatorMissedAttestationEventName } -func (n *validatorGotSlashedNotification) GetEpoch() uint64 { - return n.Epoch -} +type validatorGotSlashedNotification struct { + types.NotificationBaseImpl -func (n *validatorGotSlashedNotification) GetEventName() types.EventName { - return types.ValidatorGotSlashedEventName + ValidatorIndex uint64 + Slasher uint64 + Reason string } func (n *validatorGotSlashedNotification) GetInfo(includeUrl bool) string { @@ -1889,8 +1727,8 @@ func (n *validatorGotSlashedNotification) GetTitle() string { return "Validator got Slashed" } -func (n *validatorGotSlashedNotification) GetEventFilter() string { - return n.EventFilter +func (n *validatorGotSlashedNotification) GetEventName() types.EventName { + return types.ValidatorGotSlashedEventName } func (n *validatorGotSlashedNotification) GetInfoMarkdown() string { @@ -1898,100 +1736,61 @@ func (n *validatorGotSlashedNotification) GetInfoMarkdown() string { return generalPart } -func collectValidatorGotSlashedNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectValidatorGotSlashedNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { dbResult, err := db.GetValidatorsGotSlashed(epoch) if err != nil { return fmt.Errorf("error getting slashed validators from database, err: %w", err) } - query := "" - resultsLen := len(dbResult) - for i, event := range dbResult { - query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, ENCODE(unsubscribe_hash, 'hex') AS unsubscribe_hash from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) - if i < resultsLen-1 { - query += " UNION " - } - } - - if query == "" { - return nil - } - - var subscribers []struct { - Ref uint64 `db:"ref"` - Id uint64 `db:"id"` - UserId uint64 `db:"user_id"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` + slashedPubkeys := make([]string, 0, len(dbResult)) + pubkeyToSlashingInfoMap := make(map[string]*types.SlashingInfo) + for _, event := range dbResult { + pubkeyStr := hex.EncodeToString(event.SlashedValidatorPubkey) + slashedPubkeys = append(slashedPubkeys, pubkeyStr) + pubkeyToSlashingInfoMap[pubkeyStr] = event } - name := string(types.ValidatorGotSlashedEventName) - if utils.Config.Chain.ClConfig.ConfigName != "" { - name = utils.Config.Chain.ClConfig.ConfigName + ":" + name - } - err = db.FrontendWriterDB.Select(&subscribers, query, name) + subscribedUsers, err := GetSubsForEventFilter(types.ValidatorGotSlashedEventName, "", nil, slashedPubkeys) if err != nil { - return fmt.Errorf("error querying subscribers, err: %w", err) + return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorGotSlashedEventName, err) } - for _, sub := range subscribers { - event := dbResult[sub.Ref] - - log.Infof("creating %v notification for validator %v in epoch %v", event.SlashedValidatorPubkey, event.Reason, epoch) - - n := &validatorGotSlashedNotification{ - SubscriptionID: sub.Id, - Slasher: event.SlasherIndex, - Epoch: event.Epoch, - Reason: event.Reason, - ValidatorIndex: event.SlashedValidatorIndex, - EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), - UnsubscribeHash: sub.UnsubscribeHash, - } - - if _, exists := notificationsByUserID[sub.UserId]; !exists { - notificationsByUserID[sub.UserId] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[sub.UserId][n.GetEventName()]; !exists { - notificationsByUserID[sub.UserId][n.GetEventName()] = []types.Notification{} + for _, subs := range subscribedUsers { + for _, sub := range subs { + event := pubkeyToSlashingInfoMap[sub.EventFilter] + if event == nil { // pubkey has not been slashed + //log.Error(fmt.Errorf("error retrieving slashing info for public key %s", sub.EventFilter), "", 0) + continue + } + log.Infof("creating %v notification for validator %v in epoch %v", event.Reason, sub.EventFilter, epoch) + + n := &validatorGotSlashedNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + Slasher: event.SlasherIndex, + Reason: event.Reason, + ValidatorIndex: event.SlashedValidatorIndex, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[sub.UserId][n.GetEventName()] = append(notificationsByUserID[sub.UserId][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } return nil } type validatorWithdrawalNotification struct { - SubscriptionID uint64 - ValidatorIndex uint64 - Epoch uint64 - Slot uint64 - Amount uint64 - Address []byte - EventFilter string - UnsubscribeHash sql.NullString -} + types.NotificationBaseImpl -func (n *validatorWithdrawalNotification) GetLatestState() string { - return "" -} - -func (n *validatorWithdrawalNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorWithdrawalNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorWithdrawalNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorWithdrawalNotification) GetEpoch() uint64 { - return n.Epoch + ValidatorIndex uint64 + Epoch uint64 + Slot uint64 + Amount uint64 + Address []byte } func (n *validatorWithdrawalNotification) GetEventName() types.EventName { @@ -2010,19 +1809,15 @@ func (n *validatorWithdrawalNotification) GetTitle() string { return "Withdrawal Processed" } -func (n *validatorWithdrawalNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { generalPart := fmt.Sprintf(`An automatic withdrawal of %[2]v has been processed for validator [%[1]v](https://%[6]v/validator/%[1]v) during slot [%[3]v](https://%[6]v/slot/%[3]v). The funds have been sent to: [%[4]v](https://%[6]v/address/0x%[5]x).`, n.ValidatorIndex, utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false), n.Slot, utils.FormatHashRaw(n.Address), n.Address, utils.Config.Frontend.SiteDomain) return generalPart } // collectWithdrawalNotifications collects all notifications validator withdrawals -func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) - _, subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) + subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missed attestations %w", err) } @@ -2049,22 +1844,19 @@ func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.E } // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) n := &validatorWithdrawalNotification{ - SubscriptionID: *sub.ID, - ValidatorIndex: event.ValidatorIndex, - Epoch: epoch, - Slot: event.Slot, - Amount: event.Amount, - Address: event.Address, - EventFilter: hex.EncodeToString(event.Pubkey), - UnsubscribeHash: sub.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + EventFilter: hex.EncodeToString(event.Pubkey), + EventName: sub.EventName, + }, + ValidatorIndex: event.ValidatorIndex, + Epoch: epoch, + Slot: event.Slot, + Amount: event.Amount, + Address: event.Address, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2074,35 +1866,9 @@ func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.E } type ethClientNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EthClient string - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *ethClientNotification) GetLatestState() string { - return "" -} - -func (n *ethClientNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *ethClientNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *ethClientNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} + types.NotificationBaseImpl -func (n *ethClientNotification) GetEpoch() uint64 { - return n.Epoch + EthClient string } func (n *ethClientNotification) GetEventName() types.EventName { @@ -2147,10 +1913,6 @@ func (n *ethClientNotification) GetTitle() string { return fmt.Sprintf("New %s update", n.EthClient) } -func (n *ethClientNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *ethClientNotification) GetInfoMarkdown() string { url := "" switch n.EthClient { @@ -2183,50 +1945,45 @@ func (n *ethClientNotification) GetInfoMarkdown() string { return generalPart } -func collectEthClientNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId) error { updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates for _, client := range updatedClients { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE - us.event_name=$1 - AND - us.event_filter=$2 - AND - ((us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP($3) > us.last_sent_ts) OR us.last_sent_ts IS NULL) - `, - eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client - + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE + // us.event_name=$1 + // AND + // us.event_filter=$2 + // AND + // ((us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP($3) > us.last_sent_ts) OR us.last_sent_ts IS NULL) + // `, + // eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client + + dbResult, err := GetSubsForEventFilter( + types.EthClientUpdateEventName, + "((last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > last_sent_ts) OR last_sent_ts IS NULL)", + []interface{}{client.Date.Unix()}, + []string{strings.ToLower(client.Name)}) if err != nil { return err } - for _, r := range dbResult { - n := ðClientNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EthClient: client.Name, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + for _, subs := range dbResult { + for _, sub := range subs { + n := ðClientNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + EthClient: client.Name, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } return nil @@ -2234,17 +1991,17 @@ func collectEthClientNotifications(notificationsByUserID map[uint64]map[types.Ev type MachineEvents struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` MachineName string `db:"machine"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` EventThreshold float64 `db:"event_threshold"` } -func collectMonitoringMachineOffline(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineOffline(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { nowTs := time.Now().Unix() return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineOfflineEventName, 120, // notify condition - func(_ *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if machineData.CurrentDataInsertTs < nowTs-10*60 && machineData.CurrentDataInsertTs > nowTs-90*60 { return true } @@ -2259,10 +2016,10 @@ func isMachineDataRecent(machineData *types.MachineMetricSystemUser) bool { return machineData.CurrentDataInsertTs >= nowTs-60*60 } -func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineDiskAlmostFull(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineDiskAlmostFullEventName, 750, // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if !isMachineDataRecent(machineData) { return false } @@ -2274,10 +2031,10 @@ func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[uint64]map ) } -func collectMonitoringMachineCPULoad(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineCPULoad(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineCpuLoadEventName, 10, // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if !isMachineDataRecent(machineData) { return false } @@ -2296,10 +2053,10 @@ func collectMonitoringMachineCPULoad(notificationsByUserID map[uint64]map[types. ) } -func collectMonitoringMachineMemoryUsage(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineMemoryUsage(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineMemoryUsageEventName, 10, // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if !isMachineDataRecent(machineData) { return false } @@ -2317,32 +2074,45 @@ func collectMonitoringMachineMemoryUsage(notificationsByUserID map[uint64]map[ty var isFirstNotificationCheck = true func collectMonitoringMachine( - notificationsByUserID map[uint64]map[types.EventName][]types.Notification, + notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epochWaitInBetween int, - notifyConditionFulfilled func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool, + notifyConditionFulfilled func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool, epoch uint64, ) error { - var allSubscribed []MachineEvents - err := db.FrontendWriterDB.Select(&allSubscribed, - `SELECT - us.user_id, - max(us.id) AS id, - ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, - event_filter AS machine, - COALESCE(event_threshold, 0) AS event_threshold - FROM users_subscriptions us - WHERE us.event_name = $1 AND us.created_epoch <= $2 - AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) - group by us.user_id, machine, event_threshold`, - eventName, epoch, epochWaitInBetween) + // event_filter == machine name + + dbResult, err := GetSubsForEventFilter( + eventName, + "(created_epoch <= ? AND (last_sent_epoch < (?::int - ?::int) OR last_sent_epoch IS NULL))", // ::int is required here otherwise the generated goqu query throw an error + []interface{}{epoch, epoch, epochWaitInBetween}, + nil, + ) + + // TODO: clarify why we need grouping here?! + // err := db.FrontendWriterDB.Select(&allSubscribed, + // `SELECT + // us.user_id, + // max(us.id) AS id, + // ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, + // event_filter, + // COALESCE(event_threshold, 0) AS event_threshold + // FROM users_subscriptions us + // WHERE us.event_name = $1 AND us.created_epoch <= $2 + // AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) + // group by us.user_id, event_filter, event_threshold`, + // eventName, epoch, epochWaitInBetween) if err != nil { return err } rowKeys := gcp_bigtable.RowList{} - for _, data := range allSubscribed { - rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(data.UserID, "system", data.MachineName)) + totalSubscribed := 0 + for _, data := range dbResult { + for _, sub := range data { + rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(*sub.UserID, "system", sub.EventFilter)) + totalSubscribed++ + } } machineDataOfSubscribed, err := db.BigtableClient.GetMachineMetricsForNotifications(rowKeys) @@ -2350,21 +2120,23 @@ func collectMonitoringMachine( return err } - var result []MachineEvents - for _, data := range allSubscribed { - localData := data // Create a local copy of the data variable - machineMap, found := machineDataOfSubscribed[localData.UserID] - if !found { - continue - } - currentMachineData, found := machineMap[localData.MachineName] - if !found { - continue - } + var result []*types.Subscription + for _, data := range dbResult { + for _, sub := range data { + localData := sub // Create a local copy of the data variable + machineMap, found := machineDataOfSubscribed[*localData.UserID] + if !found { + continue + } + currentMachineData, found := machineMap[localData.EventFilter] + if !found { + continue + } - //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) - if notifyConditionFulfilled(&localData, currentMachineData) { - result = append(result, localData) + //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) + if notifyConditionFulfilled(&localData, currentMachineData) { + result = append(result, &localData) + } } } @@ -2402,7 +2174,7 @@ func collectMonitoringMachine( subRatioThreshold = subFirstRatioThreshold isFirstNotificationCheck = false } - if float64(len(result))/float64(len(allSubscribed)) >= subRatioThreshold { + if float64(len(result))/float64(totalSubscribed) >= subRatioThreshold { log.Error(nil, fmt.Errorf("error too many users would be notified concerning: %v", eventName), 0) return nil } @@ -2410,21 +2182,17 @@ func collectMonitoringMachine( for _, r := range result { n := &monitorMachineNotification{ - SubscriptionID: r.SubscriptionID, - MachineName: r.MachineName, - UserID: r.UserID, - EventName: eventName, - Epoch: epoch, - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *r.ID, + UserID: *r.UserID, + EventName: r.EventName, + Epoch: epoch, + EventFilter: r.EventFilter, + }, + MachineName: r.EventFilter, } //logrus.Infof("notify %v %v", eventName, n) - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -2437,39 +2205,9 @@ func collectMonitoringMachine( } type monitorMachineNotification struct { - SubscriptionID uint64 - MachineName string - UserID uint64 - Epoch uint64 - EventName types.EventName - UnsubscribeHash sql.NullString -} - -func (n *monitorMachineNotification) GetLatestState() string { - return "" -} - -func (n *monitorMachineNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *monitorMachineNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *monitorMachineNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} + types.NotificationBaseImpl -func (n *monitorMachineNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *monitorMachineNotification) GetEventName() types.EventName { - return n.EventName + MachineName string } func (n *monitorMachineNotification) GetInfo(includeUrl bool) string { @@ -2480,10 +2218,6 @@ func (n *monitorMachineNotification) GetInfo(includeUrl bool) string { return fmt.Sprintf(`Your staking machine "%v" might be offline. It has not been seen for a couple minutes now.`, n.MachineName) case types.MonitoringMachineCpuLoadEventName: return fmt.Sprintf(`Your staking machine "%v" has reached your configured CPU usage threshold.`, n.MachineName) - case types.MonitoringMachineSwitchedToETH1FallbackEventName: - return fmt.Sprintf(`Your staking machine "%v" has switched to your configured ETH1 fallback`, n.MachineName) - case types.MonitoringMachineSwitchedToETH2FallbackEventName: - return fmt.Sprintf(`Your staking machine "%v" has switched to your configured ETH2 fallback`, n.MachineName) case types.MonitoringMachineMemoryUsageEventName: return fmt.Sprintf(`Your staking machine "%v" has reached your configured RAM threshold.`, n.MachineName) } @@ -2498,10 +2232,6 @@ func (n *monitorMachineNotification) GetTitle() string { return "Staking Machine Offline" case types.MonitoringMachineCpuLoadEventName: return "High CPU Load" - case types.MonitoringMachineSwitchedToETH1FallbackEventName: - return "ETH1 Fallback Active" - case types.MonitoringMachineSwitchedToETH2FallbackEventName: - return "ETH2 Fallback Active" case types.MonitoringMachineMemoryUsageEventName: return "Memory Warning" } @@ -2517,22 +2247,7 @@ func (n *monitorMachineNotification) GetInfoMarkdown() string { } type taxReportNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *taxReportNotification) GetLatestState() string { - return "" -} - -func (n *taxReportNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" + types.NotificationBaseImpl } func (n *taxReportNotification) GetEmailAttachment() *types.EmailAttachment { @@ -2569,14 +2284,6 @@ func (n *taxReportNotification) GetEmailAttachment() *types.EmailAttachment { return &types.EmailAttachment{Attachment: pdf, Name: fmt.Sprintf("income_history_%v_%v.pdf", firstDay.Format("20060102"), lastDay.Format("20060102"))} } -func (n *taxReportNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *taxReportNotification) GetEpoch() uint64 { - return n.Epoch -} - func (n *taxReportNotification) GetEventName() types.EventName { return types.TaxReportEventName } @@ -2598,7 +2305,7 @@ func (n *taxReportNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectTaxReportNotificationNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectTaxReportNotificationNotifications(notificationsByUserID types.NotificationsPerUserId) error { lastStatsDay, err := cache.LatestExportedStatisticDay.GetOrDefault(db.GetLastExportedStatisticDay) if err != nil { @@ -2611,80 +2318,44 @@ func collectTaxReportNotificationNotifications(notificationsByUserID map[uint64] return nil } - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - name := string(eventName) - if utils.Config.Chain.ClConfig.ConfigName != "" { - name = utils.Config.Chain.ClConfig.ConfigName + ":" + name - } - - err = db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts < $2 OR (us.last_sent_ts IS NULL AND us.created_ts < $2)); - `, - name, firstDayOfMonth) + // err = db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts < $2 OR (us.last_sent_ts IS NULL AND us.created_ts < $2)); + // `, + // name, firstDayOfMonth) + dbResults, err := GetSubsForEventFilter( + types.TaxReportEventName, + "(last_sent_ts < ? OR (last_sent_ts IS NULL AND created_ts < ?))", + []interface{}{firstDayOfMonth, firstDayOfMonth}, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &taxReportNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + for _, subs := range dbResults { + for _, sub := range subs { + n := &taxReportNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } return nil } type networkNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *networkNotification) GetLatestState() string { - return "" -} - -func (n *networkNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *networkNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *networkNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *networkNotification) GetEpoch() uint64 { - return n.Epoch + types.NotificationBaseImpl } func (n *networkNotification) GetEventName() types.EventName { @@ -2700,16 +2371,12 @@ func (n *networkNotification) GetTitle() string { return "Beaconchain Network Issues" } -func (n *networkNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *networkNotification) GetInfoMarkdown() string { generalPart := fmt.Sprintf(`Network experienced finality issues ([view chart](https://%v/charts/network_liveness)).`, utils.Config.Frontend.SiteDomain) return generalPart } -func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUserId) error { count := 0 err := db.WriterDb.Get(&count, ` SELECT count(ts) FROM network_liveness WHERE (headepoch-finalizedepoch) > 3 AND ts > now() - interval '60 minutes'; @@ -2720,41 +2387,38 @@ func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.Even } if count > 0 { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL); - `, - utils.GetNetwork()+":"+string(eventName)) - + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL); + // `, + // utils.GetNetwork()+":"+string(eventName)) + + dbResult, err := GetSubsForEventFilter( + types.NetworkLivenessIncreasedEventName, + "(last_sent_ts <= NOW() - INTERVAL '1 hour' OR last_sent_ts IS NULL)", + nil, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &networkNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + for _, subs := range dbResult { + for _, sub := range subs { + n := &networkNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2762,40 +2426,8 @@ func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.Even } type rocketpoolNotification struct { - SubscriptionID uint64 - UserID uint64 - Epoch uint64 - EventFilter string - EventName types.EventName - ExtraData string - UnsubscribeHash sql.NullString -} - -func (n *rocketpoolNotification) GetLatestState() string { - return "" -} - -func (n *rocketpoolNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *rocketpoolNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *rocketpoolNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *rocketpoolNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *rocketpoolNotification) GetEventName() types.EventName { - return n.EventName + types.NotificationBaseImpl + ExtraData string } func (n *rocketpoolNotification) GetInfo(includeUrl bool) string { @@ -2809,7 +2441,9 @@ func (n *rocketpoolNotification) GetInfo(includeUrl bool) string { case types.RocketpoolCollateralMinReached: return fmt.Sprintf(`Your RPL collateral has reached your configured threshold at %v%%.`, n.ExtraData) case types.SyncCommitteeSoon: - return getSyncCommitteeSoonInfo([]types.Notification{n}) + return getSyncCommitteeSoonInfo(map[types.EventFilter]types.Notification{ + types.EventFilter(n.EventFilter): n, + }) } return "" @@ -2831,15 +2465,11 @@ func (n *rocketpoolNotification) GetTitle() string { return "" } -func (n *rocketpoolNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *rocketpoolNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectRocketpoolComissionNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId) error { fee := 0.0 err := db.WriterDb.Get(&fee, ` select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; @@ -2850,50 +2480,46 @@ func collectRocketpoolComissionNotifications(notificationsByUserID map[uint64]ma } if fee > 0 { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= $2 OR (us.event_threshold < 0 AND us.event_threshold * -1 >= $2)); - `, - utils.GetNetwork()+":"+string(eventName), fee) - + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= $2 OR (us.event_threshold < 0 AND us.event_threshold * -1 >= $2)); + // `, + // utils.GetNetwork()+":"+string(eventName), fee) + + dbResult, err := GetSubsForEventFilter( + types.RocketpoolCommissionThresholdEventName, + "(last_sent_ts <= NOW() - INTERVAL '8 hours' OR last_sent_ts IS NULL) AND (event_threshold <= ? OR (event_threshold < 0 AND event_threshold * -1 >= ?))", + []interface{}{fee, fee}, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + for _, subs := range dbResult { + for _, sub := range subs { + n := &rocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } return nil } -func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId) error { var ts int64 err := db.WriterDb.Get(&ts, ` select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; @@ -2904,50 +2530,48 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[ui } if ts+3*60*60 > time.Now().Unix() { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL); - `, - utils.GetNetwork()+":"+string(eventName)) - + // var dbResult []*types.Subscription + + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL); + // `, + // utils.GetNetwork()+":"+string(eventName)) + + dbResult, err := GetSubsForEventFilter( + types.RocketpoolNewClaimRoundStartedEventName, + "(last_sent_ts <= NOW() - INTERVAL '5 hours' OR last_sent_ts IS NULL)", + nil, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + for _, subs := range dbResult { + for _, sub := range subs { + n := &rocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + } + + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } return nil } -func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { - pubkeys, subMap, err := GetSubsForEventFilter(eventName) +func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { + subMap, err := GetSubsForEventFilter(eventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) } @@ -2959,20 +2583,34 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint6 RPLStakeMax BigFloat `db:"max_rpl_stake"` } + // filter nodes with no minipools (anymore) because they have min/max stake of 0 + // TODO properly remove notification entry from db stakeInfoPerNode := make([]dbResult, 0) batchSize := 5000 - dataLen := len(pubkeys) - for i := 0; i < dataLen; i += batchSize { - var keys [][]byte - start := i - end := i + batchSize - - if dataLen < end { - end = dataLen + keys := make([][]byte, 0, batchSize) + for pubkey := range subMap { + b, err := hex.DecodeString(pubkey) + if err != nil { + log.Error(err, fmt.Sprintf("error decoding pubkey %s", pubkey), 0) + continue } + keys = append(keys, b) - keys = pubkeys[start:end] + if len(keys) > batchSize { + var partial []dbResult + err = db.WriterDb.Select(&partial, ` + SELECT address, rpl_stake, min_rpl_stake, max_rpl_stake + FROM rocketpool_nodes + WHERE address = ANY($1) AND min_rpl_stake != 0 AND max_rpl_stake != 0`, pq.ByteaArray(keys)) + if err != nil { + return err + } + stakeInfoPerNode = append(stakeInfoPerNode, partial...) + keys = make([][]byte, 0, batchSize) + } + } + if len(keys) > 0 { var partial []dbResult // filter nodes with no minipools (anymore) because they have min/max stake of 0 @@ -3048,21 +2686,17 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint6 } n := &rocketpoolNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: epoch, - EventFilter: sub.EventFilter, - EventName: eventName, - ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), - UnsubscribeHash: sub.UnsubscribeHash, - } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -3110,7 +2744,7 @@ func bigFloat(x float64) *big.Float { return new(big.Float).SetFloat64(x) } -func collectSyncCommittee(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { +func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee nextPeriod := currentPeriod + 1 @@ -3136,43 +2770,34 @@ func collectSyncCommittee(notificationsByUserID map[uint64]map[types.EventName][ pubKeys = append(pubKeys, val.PubKey) } - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err = db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL) AND event_filter = ANY($2); - `, - utils.GetNetwork()+":"+string(eventName), pq.StringArray(pubKeys), - ) + dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoon, "(last_sent_ts <= NOW() - INTERVAL '26 hours' OR last_sent_ts IS NULL)", nil, pubKeys) + // err = db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL) AND event_filter = ANY($2); + // `, + // utils.GetNetwork()+":"+string(eventName), pq.StringArray(pubKeys), + // ) if err != nil { return err } - for _, r := range dbResult { - n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: epoch, - EventFilter: r.EventFilter, - EventName: eventName, - ExtraData: fmt.Sprintf("%v|%v|%v", mapping[r.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), - UnsubscribeHash: r.UnsubscribeHash, - } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + for _, subs := range dbResult { + for _, sub := range subs { + n := &rocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + ExtraData: fmt.Sprintf("%v|%v|%v", mapping[sub.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } return nil @@ -3188,7 +2813,7 @@ type WebhookQueue struct { LastTry time.Time `db:"last_try"` } -func getEventInfo(event types.EventName, ns []types.Notification) string { +func getEventInfo(event types.EventName, ns map[types.EventFilter]types.Notification) string { switch event { case types.SyncCommitteeSoon: return getSyncCommitteeSoonInfo(ns) @@ -3199,12 +2824,13 @@ func getEventInfo(event types.EventName, ns []types.Notification) string { return "" } -func getSyncCommitteeSoonInfo(ns []types.Notification) string { +func getSyncCommitteeSoonInfo(ns map[types.EventFilter]types.Notification) string { validators := []string{} var startEpoch, endEpoch string var inTime time.Duration - for i, n := range ns { + i := 0 + for _, n := range ns { n, ok := n.(*rocketpoolNotification) if !ok { log.Error(nil, "Sync committee notification not of type rocketpoolNotification", 0) @@ -3230,6 +2856,7 @@ func getSyncCommitteeSoonInfo(ns []types.Notification) string { } inTime = inTime.Round(time.Second) } + i++ } if len(validators) > 0 { diff --git a/backend/pkg/userservice/appsubscription_oracle.go b/backend/pkg/userservice/appsubscription_oracle.go index 0f50ed916..7dbf2e1a7 100644 --- a/backend/pkg/userservice/appsubscription_oracle.go +++ b/backend/pkg/userservice/appsubscription_oracle.go @@ -11,6 +11,8 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/metrics" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" @@ -37,18 +39,21 @@ func CheckMobileSubscriptions() { receipts, err := db.GetAllAppSubscriptions() if err != nil { + metrics.Errors.WithLabelValues("appsub_verify_db_failed").Inc() log.Error(err, "error retrieving subscription data from db: %v", 0, nil) return } googleClient, err := initGoogle() if googleClient == nil { + metrics.Errors.WithLabelValues("appsub_verify_initgoogle_failed").Inc() log.Error(err, "error initializing google client: %v", 0, nil) return } appleClient, err := initApple() if err != nil { + metrics.Errors.WithLabelValues("appsub_verify_initapple_failed").Inc() log.Error(err, "error initializing apple client: %v", 0, nil) return } @@ -71,11 +76,14 @@ func CheckMobileSubscriptions() { if strings.Contains(err.Error(), "expired") { err = db.SetSubscriptionToExpired(nil, receipt.ID) if err != nil { + metrics.Errors.WithLabelValues("appsub_verify_write_failed").Inc() log.Error(err, "subscription set expired failed", 0, map[string]interface{}{"receiptID": receipt.ID}) } continue } log.Warnf("subscription verification failed in service for [%v]: %v", receipt.ID, err) + metrics.Errors.WithLabelValues(fmt.Sprintf("appsub_verify_%s_failed", receipt.Store)).Inc() + continue } @@ -86,6 +94,8 @@ func CheckMobileSubscriptions() { updateValidationState(receipt, valid) } + services.ReportStatus("app_subscriptions_check", "Running", nil) + log.InfoWithFields(log.Fields{"subscriptions": len(receipts), "duration": time.Since(start)}, "subscription update completed") time.Sleep(time.Hour * 4) } diff --git a/backend/pkg/userservice/stripe_email_updater.go b/backend/pkg/userservice/stripe_email_updater.go index 2f790e20f..f539c8bd6 100644 --- a/backend/pkg/userservice/stripe_email_updater.go +++ b/backend/pkg/userservice/stripe_email_updater.go @@ -10,6 +10,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) @@ -50,6 +51,8 @@ func StripeEmailUpdater() { } } + services.ReportStatus("stripe_email_updater", "Running", nil) + time.Sleep(time.Minute) } } diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index a97dbcf2a..5d7d6f16c 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -1,17 +1,32 @@ { "conventionalCommits.scopes": [ + "BcButton", + "BcLink", + "BcTablePager", + "DashboardChartSummaryChartFilter", + "DashboardGroupManagementModal", + "DashboardValidatorManagmentModal", + "NotificationsClientsTable", + "NotificationsNetworkTable", + "NotificationsOverview", + "a11y", "checkout", "ci", + "csrf", "customFetch", - "DashboardChartSummaryChartFilter", - "DashboardGroupManagementModal", + "dialog", "eslint", + "feature-flags", "git", + "git-blame-ignore", "i18n", "mainHeader", + "notifications", + "nuxt.config", "qrCode", - "vscode", - "DashboardValidatorManagmentModal" + "useNotificationsOverviewStore", + "useWindowSize", + "vscode" ], "editor.codeActionsOnSave": { "source.fixAll.eslint": "always" diff --git a/frontend/assets/css/_breakpoints.scss b/frontend/assets/css/_breakpoints.scss new file mode 100644 index 000000000..82403ddf1 --- /dev/null +++ b/frontend/assets/css/_breakpoints.scss @@ -0,0 +1,5 @@ +$breakpoint-sm: 640px; +$breakpoint-md: 768px; +$breakpoint-lg: 1024px; +$breakpoint-xl: 1280px; +$breakpoint-2xl: 1536px; \ No newline at end of file diff --git a/frontend/assets/css/main.scss b/frontend/assets/css/main.scss index e551aa1fd..f441fae57 100644 --- a/frontend/assets/css/main.scss +++ b/frontend/assets/css/main.scss @@ -96,16 +96,3 @@ svg { } } } - -// see https://tailwindcss.com/docs/screen-readers -sr-only { - position: absolute; - width: 1px; - height: 1px; - padding: 0; - margin: -1px; - overflow: hidden; - clip: rect(0, 0, 0, 0); - white-space: nowrap; - border-width: 0; -} diff --git a/frontend/assets/css/prime.scss b/frontend/assets/css/prime.scss index 1e80ebaa3..2816bfd93 100644 --- a/frontend/assets/css/prime.scss +++ b/frontend/assets/css/prime.scss @@ -634,11 +634,11 @@ div.p-accordion { * TODO: remove the .p-overflow-hidden and .p-overlay-mask class when PrimeVue is updated. * This is quick-fix for shifting display issues. **/ - .p-overflow-hidden { + div.p-overflow-hidden { overflow: hidden !important; /* Block scroll */ border-right: solid 5px transparent !important; } -.p-overlay-mask { - background: var(--container-background); -} +div.p-dialog-mask.p-overlay-mask { + background: rgba(0, 0, 0, 0.5); +} \ No newline at end of file diff --git a/frontend/components/BcFeatureFlag.vue b/frontend/components/BcFeatureFlag.vue new file mode 100644 index 000000000..5d69eb435 --- /dev/null +++ b/frontend/components/BcFeatureFlag.vue @@ -0,0 +1,14 @@ + + + + + diff --git a/frontend/components/bc/BcButton.vue b/frontend/components/bc/BcButton.vue index 1e159f2f6..829b6e367 100644 --- a/frontend/components/bc/BcButton.vue +++ b/frontend/components/bc/BcButton.vue @@ -39,7 +39,7 @@ const shouldAppearDisabled = computed( diff --git a/frontend/components/bc/BcContentFilter.vue b/frontend/components/bc/BcContentFilter.vue index edd6e4038..53e3a67cb 100644 --- a/frontend/components/bc/BcContentFilter.vue +++ b/frontend/components/bc/BcContentFilter.vue @@ -58,9 +58,9 @@ const handleClick = () => { :class="{ filter_visible: isFilterVisible }" @click="handleClick" > - + {{ !isFilterVisible ? $t('filter.open') : $t('filter.close') }} - + diff --git a/frontend/components/bc/BcLink.vue b/frontend/components/bc/BcLink.vue index b33af7173..6a27c8630 100644 --- a/frontend/components/bc/BcLink.vue +++ b/frontend/components/bc/BcLink.vue @@ -1,7 +1,16 @@ - +