diff --git a/.github/workflows/backend-converted-types-check.yml b/.github/workflows/backend-converted-types-check.yml new file mode 100644 index 000000000..a59593a02 --- /dev/null +++ b/.github/workflows/backend-converted-types-check.yml @@ -0,0 +1,47 @@ +name: Backend-Converted-Types-Check +on: + push: + paths: + - 'backend/pkg/api/types/**' + - 'frontend/types/api/**' + branches: + - main + - staging + pull_request: + paths: + - 'backend/pkg/api/types/**' + - 'frontend/types/api/**' + branches: + - '*' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + checks: write + +jobs: + build: + name: converted-types-check + runs-on: self-hosted + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version-file: 'backend/go.mod' + cache-dependency-path: 'backend/go.sum' + - name: Check if all backend-types have been converted to frontend-types + working-directory: backend + run: | + currHash=$(find ../frontend/types/api -type f -print0 | sort -z | xargs -0 sha1sum | sha256sum | head -c 64) + make frontend-types + newHash=$(find ../frontend/types/api -type f -print0 | sort -z | xargs -0 sha1sum | sha256sum | head -c 64) + if [ "$currHash" != "$newHash" ]; then + echo "frontend-types have changed, please commit the changes" + git diff --stat + exit 1 + fi + diff --git a/backend/cmd/exporter/main.go b/backend/cmd/exporter/main.go index 2bf0a014e..6f3cf7c72 100644 --- a/backend/cmd/exporter/main.go +++ b/backend/cmd/exporter/main.go @@ -193,7 +193,19 @@ func Run() { go services.StartHistoricPriceService() } - go modules.StartAll(context) + usedModules := []modules.ModuleInterface{} + + if cfg.JustV2 { + usedModules = append(usedModules, modules.NewDashboardDataModule(context)) + } else { + usedModules = append(usedModules, + modules.NewSlotExporter(context), + modules.NewExecutionDepositsExporter(context), + modules.NewExecutionPayloadsExporter(context), + ) + } + + go modules.StartAll(context, usedModules, cfg.JustV2) // Keep the program alive until Ctrl+C is pressed utils.WaitForCtrlC() diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index 215c5d01c..c567a0371 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -247,27 +247,27 @@ func Run() { } // clickhouse - // db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ - // Username: cfg.ClickHouse.WriterDatabase.Username, - // Password: cfg.ClickHouse.WriterDatabase.Password, - // Name: cfg.ClickHouse.WriterDatabase.Name, - // Host: cfg.ClickHouse.WriterDatabase.Host, - // Port: cfg.ClickHouse.WriterDatabase.Port, - // MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, - // SSL: true, - // MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, - // }, &types.DatabaseConfig{ - // Username: cfg.ClickHouse.ReaderDatabase.Username, - // Password: cfg.ClickHouse.ReaderDatabase.Password, - // Name: cfg.ClickHouse.ReaderDatabase.Name, - // Host: cfg.ClickHouse.ReaderDatabase.Host, - // Port: cfg.ClickHouse.ReaderDatabase.Port, - // MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, - // SSL: true, - // MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, - // }, "clickhouse", "clickhouse") - // defer db.ClickHouseReader.Close() - // defer db.ClickHouseWriter.Close() + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.ClickHouse.WriterDatabase.Username, + Password: cfg.ClickHouse.WriterDatabase.Password, + Name: cfg.ClickHouse.WriterDatabase.Name, + Host: cfg.ClickHouse.WriterDatabase.Host, + Port: cfg.ClickHouse.WriterDatabase.Port, + MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, + SSL: true, + MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, + }, &types.DatabaseConfig{ + Username: cfg.ClickHouse.ReaderDatabase.Username, + Password: cfg.ClickHouse.ReaderDatabase.Password, + Name: cfg.ClickHouse.ReaderDatabase.Name, + Host: cfg.ClickHouse.ReaderDatabase.Host, + Port: cfg.ClickHouse.ReaderDatabase.Port, + MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, + SSL: true, + MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, + }, "clickhouse", "clickhouse") + defer db.ClickHouseReader.Close() + defer db.ClickHouseWriter.Close() // Initialize the persistent redis client if requires.Redis { @@ -564,6 +564,40 @@ func collectNotifications(startEpoch uint64) error { if len(notifications[0]) > 0 { spew.Dump(notifications[0]) } + + emails, err := notification.RenderEmailsForUserEvents(0, notifications) + if err != nil { + return err + } + + for _, email := range emails { + // if email.Address == "" { + log.Infof("to: %v", email.Address) + log.Infof("subject: %v", email.Subject) + log.Infof("body: %v", email.Email.Body) + log.Info("-----") + // } + } + + // pushMessages, err := notification.RenderPushMessagesForUserEvents(0, notifications) + // if err != nil { + // return err + // } + + // for _, pushMessage := range pushMessages { + // message := pushMessage.Messages[0] + // log.Infof("title: %v body: %v", message.Notification.Title, message.Notification.Body) + + // if message.Token == "" { + // log.Info("sending test message") + + // err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, false) + // if err != nil { + // log.Error(err, "error sending firebase batch job", 0) + // } + // } + // } + return nil } @@ -606,7 +640,7 @@ func collectUserDbNotifications(startEpoch uint64) error { if message.Token == "" { log.Info("sending test message") - err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, false) + err = notification.SendPushBatch(pushMessage.UserId, []*messaging.Message{message}, true) if err != nil { log.Error(err, "error sending firebase batch job", 0) } diff --git a/backend/cmd/notification_collector/main.go b/backend/cmd/notification_collector/main.go index d6556eaa7..feeecf98b 100644 --- a/backend/cmd/notification_collector/main.go +++ b/backend/cmd/notification_collector/main.go @@ -150,6 +150,31 @@ func Run() { }, "pgx", "postgres") }() + wg.Add(1) + go func() { + defer wg.Done() + // clickhouse + db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ + Username: cfg.ClickHouse.WriterDatabase.Username, + Password: cfg.ClickHouse.WriterDatabase.Password, + Name: cfg.ClickHouse.WriterDatabase.Name, + Host: cfg.ClickHouse.WriterDatabase.Host, + Port: cfg.ClickHouse.WriterDatabase.Port, + MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, + SSL: true, + MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, + }, &types.DatabaseConfig{ + Username: cfg.ClickHouse.ReaderDatabase.Username, + Password: cfg.ClickHouse.ReaderDatabase.Password, + Name: cfg.ClickHouse.ReaderDatabase.Name, + Host: cfg.ClickHouse.ReaderDatabase.Host, + Port: cfg.ClickHouse.ReaderDatabase.Port, + MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, + SSL: true, + MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, + }, "clickhouse", "clickhouse") + }() + wg.Add(1) go func() { defer wg.Done() @@ -184,6 +209,8 @@ func Run() { defer db.FrontendWriterDB.Close() defer db.AlloyReader.Close() defer db.AlloyWriter.Close() + defer db.ClickHouseReader.Close() + defer db.ClickHouseWriter.Close() defer db.BigtableClient.Close() log.Infof("database connection established") diff --git a/backend/cmd/typescript_converter/main.go b/backend/cmd/typescript_converter/main.go index 8c4e81433..4b06729b3 100644 --- a/backend/cmd/typescript_converter/main.go +++ b/backend/cmd/typescript_converter/main.go @@ -24,7 +24,7 @@ const ( ) // Files that should not be converted to TypeScript -var ignoredFiles = []string{"data_access", "search_types", "archiver"} +var ignoredFiles = []string{"data_access", "search_types", "archiver", "rocketpool"} var typeMappings = map[string]string{ "decimal.Decimal": "string /* decimal.Decimal */", diff --git a/backend/go.mod b/backend/go.mod index c9eda313c..ff2204794 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -64,7 +64,7 @@ require ( github.com/prysmaticlabs/go-ssz v0.0.0-20210121151755-f6208871c388 github.com/rocket-pool/rocketpool-go v1.8.3-0.20240618173422-783b8668f5b4 github.com/rocket-pool/smartnode v1.13.6 - github.com/shopspring/decimal v1.3.1 + github.com/shopspring/decimal v1.4.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d diff --git a/backend/go.sum b/backend/go.sum index 0ab17372b..481c0f1c4 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -873,6 +873,8 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= diff --git a/backend/pkg/api/auth.go b/backend/pkg/api/auth.go index dc8a62814..13c4d860d 100644 --- a/backend/pkg/api/auth.go +++ b/backend/pkg/api/auth.go @@ -13,8 +13,10 @@ import ( "github.com/gorilla/csrf" ) +var day time.Duration = time.Hour * 24 +var sessionDuration time.Duration = day * 365 + func newSessionManager(cfg *types.Config) *scs.SessionManager { - // TODO: replace redis with user db down the line (or replace sessions with oauth2) pool := &redis.Pool{ MaxIdle: 10, Dial: func() (redis.Conn, error) { @@ -23,7 +25,7 @@ func newSessionManager(cfg *types.Config) *scs.SessionManager { } scs := scs.New() - scs.Lifetime = time.Hour * 24 * 7 + scs.Lifetime = sessionDuration scs.Cookie.Name = "session_id" scs.Cookie.HttpOnly = true scs.Cookie.Persist = true @@ -42,6 +44,19 @@ func newSessionManager(cfg *types.Config) *scs.SessionManager { return scs } +// returns a middleware that extends the session expiration if the session is older than 1 day +func getSlidingSessionExpirationMiddleware(scs *scs.SessionManager) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + deadline := scs.Deadline(r.Context()) // unauthenticated requests have deadline set to now+sessionDuration + if time.Until(deadline) < sessionDuration-day { + scs.SetDeadline(r.Context(), time.Now().Add(sessionDuration).UTC()) // setting to utc because library also does that internally + } + next.ServeHTTP(w, r) + }) + } +} + // returns goriila/csrf middleware with the given config settings func getCsrfProtectionMiddleware(cfg *types.Config) func(http.Handler) http.Handler { csrfBytes, err := hex.DecodeString(cfg.Frontend.CsrfAuthKey) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index e84e93da8..0ec429593 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -10,10 +10,11 @@ import ( "time" "github.com/go-faker/faker/v4" + "github.com/go-faker/faker/v4/pkg/interfaces" "github.com/go-faker/faker/v4/pkg/options" "github.com/gobitfly/beaconchain/pkg/api/enums" - "github.com/gobitfly/beaconchain/pkg/api/types" t "github.com/gobitfly/beaconchain/pkg/api/types" + commontypes "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/userservice" "github.com/shopspring/decimal" ) @@ -53,8 +54,7 @@ func randomEthDecimal() decimal.Decimal { // must pass a pointer to the data func commonFakeData(a interface{}) error { - // TODO fake decimal.Decimal - return faker.FakeData(a, options.WithRandomMapAndSliceMaxSize(5)) + return faker.FakeData(a, options.WithRandomMapAndSliceMaxSize(5), options.WithRandomFloatBoundaries(interfaces.RandomFloatBoundary{Start: 0, End: 1})) } func (d *DummyService) StartDataAccessServices() { @@ -387,33 +387,32 @@ func (d *DummyService) GetValidatorDashboardTotalRocketPool(ctx context.Context, return getDummyStruct[t.VDBRocketPoolTableRow]() } -func (d *DummyService) GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) { - return getDummyStruct[t.VDBNodeRocketPoolData]() -} - func (d *DummyService) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node string, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) { return getDummyWithPaging[t.VDBRocketPoolMinipoolsTableRow]() } func (d *DummyService) GetAllNetworks() ([]t.NetworkInfo, error) { - return []types.NetworkInfo{ + return []t.NetworkInfo{ { - ChainId: 1, - Name: "ethereum", + ChainId: 1, + Name: "ethereum", + NotificationsName: "mainnet", }, { - ChainId: 100, - Name: "gnosis", + ChainId: 100, + Name: "gnosis", + NotificationsName: "gnosis", }, { - ChainId: 17000, - Name: "holesky", + ChainId: 17000, + Name: "holesky", + NotificationsName: "holesky", }, }, nil } -func (d *DummyService) GetAllClients() ([]types.ClientInfo, error) { - return []types.ClientInfo{ +func (d *DummyService) GetAllClients() ([]t.ClientInfo, error) { + return []t.ClientInfo{ // execution_layer { Id: 0, @@ -576,10 +575,10 @@ func (d *DummyService) UpdateNotificationSettingsGeneral(ctx context.Context, us func (d *DummyService) UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error { return nil } -func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { +func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId uint64, name string, IsNotificationsEnabled bool) error { return nil } -func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { +func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId uint64) error { return nil } @@ -756,19 +755,37 @@ func (d *DummyService) GetValidatorDashboardMobileWidget(ctx context.Context, da return getDummyStruct[t.MobileWidgetData]() } -func (d *DummyService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit uint64, offset uint64) (*types.MachineMetricsData, error) { - data, err := getDummyStruct[types.MachineMetricsData]() +func (d *DummyService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit int, offset int) (*t.MachineMetricsData, error) { + data, err := getDummyStruct[t.MachineMetricsData]() if err != nil { return nil, err } - data.SystemMetrics = slices.SortedFunc(slices.Values(data.SystemMetrics), func(i, j *t.MachineMetricSystem) int { + data.SystemMetrics = slices.SortedFunc(slices.Values(data.SystemMetrics), func(i, j *commontypes.MachineMetricSystem) int { return int(i.Timestamp) - int(j.Timestamp) }) - data.ValidatorMetrics = slices.SortedFunc(slices.Values(data.ValidatorMetrics), func(i, j *t.MachineMetricValidator) int { + data.ValidatorMetrics = slices.SortedFunc(slices.Values(data.ValidatorMetrics), func(i, j *commontypes.MachineMetricValidator) int { return int(i.Timestamp) - int(j.Timestamp) }) - data.NodeMetrics = slices.SortedFunc(slices.Values(data.NodeMetrics), func(i, j *t.MachineMetricNode) int { + data.NodeMetrics = slices.SortedFunc(slices.Values(data.NodeMetrics), func(i, j *commontypes.MachineMetricNode) int { return int(i.Timestamp) - int(j.Timestamp) }) return data, nil } + +func (d *DummyService) PostUserMachineMetrics(ctx context.Context, userID uint64, machine, process string, data []byte) error { + return nil +} + +func (d *DummyService) GetValidatorDashboardMobileValidators(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBMobileValidatorsColumn], search string, limit uint64) ([]t.MobileValidatorDashboardValidatorsTableRow, *t.Paging, error) { + return getDummyWithPaging[t.MobileValidatorDashboardValidatorsTableRow]() +} + +func (d *DummyService) QueueTestEmailNotification(ctx context.Context, userId uint64) error { + return nil +} +func (d *DummyService) QueueTestPushNotification(ctx context.Context, userId uint64) error { + return nil +} +func (d *DummyService) QueueTestWebhookNotification(ctx context.Context, userId uint64, webhookUrl string, isDiscordWebhook bool) error { + return nil +} diff --git a/backend/pkg/api/data_access/header.go b/backend/pkg/api/data_access/header.go index 5658c9157..1a91ad832 100644 --- a/backend/pkg/api/data_access/header.go +++ b/backend/pkg/api/data_access/header.go @@ -1,9 +1,15 @@ package dataaccess import ( + "context" + "database/sql" + "fmt" + t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/price" + "github.com/gobitfly/beaconchain/pkg/commons/utils" ) func (d *DataAccessService) GetLatestSlot() (uint64, error) { @@ -26,6 +32,43 @@ func (d *DataAccessService) GetBlockHeightAt(slot uint64) (uint64, error) { return d.dummy.GetBlockHeightAt(slot) } +// returns the block number of the latest existing block at or before the given slot +func (d *DataAccessService) GetLatestBlockHeightForSlot(ctx context.Context, slot uint64) (uint64, error) { + query := `SELECT MAX(exec_block_number) FROM blocks WHERE slot <= $1` + res := uint64(0) + err := d.alloyReader.GetContext(ctx, &res, query, slot) + if err != nil { + if err == sql.ErrNoRows { + log.Warnf("no EL block found at or before slot %d", slot) + return 0, nil + } + return 0, fmt.Errorf("failed to get latest existing block height at or before slot %d: %w", slot, err) + } + return res, nil +} + +func (d *DataAccessService) GetLatestBlockHeightsForEpoch(ctx context.Context, epoch uint64) ([]uint64, error) { + // use 2 epochs as safety margin + query := ` + WITH recent_blocks AS ( + SELECT slot, exec_block_number + FROM blocks + WHERE slot < $1 + ORDER BY slot DESC + LIMIT $2 * 2 + ) + SELECT MAX(exec_block_number) OVER (ORDER BY slot) AS block + FROM recent_blocks + ORDER BY slot DESC + LIMIT $2` + res := []uint64{} + err := d.alloyReader.SelectContext(ctx, &res, query, (epoch+1)*utils.Config.Chain.ClConfig.SlotsPerEpoch, utils.Config.Chain.ClConfig.SlotsPerEpoch) + if err != nil { + return nil, fmt.Errorf("failed to get latest existing block heights for slots in epoch %d: %w", epoch, err) + } + return res, nil +} + func (d *DataAccessService) GetLatestExchangeRates() ([]t.EthConversionRate, error) { result := []t.EthConversionRate{} diff --git a/backend/pkg/api/data_access/machine_metrics.go b/backend/pkg/api/data_access/machine_metrics.go index 3b9935fcf..a8c3ca518 100644 --- a/backend/pkg/api/data_access/machine_metrics.go +++ b/backend/pkg/api/data_access/machine_metrics.go @@ -2,14 +2,57 @@ package dataaccess import ( "context" + "strings" - "github.com/gobitfly/beaconchain/pkg/api/types" + apiTypes "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) type MachineRepository interface { - GetUserMachineMetrics(context context.Context, userID uint64, limit uint64, offset uint64) (*types.MachineMetricsData, error) + GetUserMachineMetrics(context context.Context, userID uint64, limit int, offset int) (*apiTypes.MachineMetricsData, error) + PostUserMachineMetrics(context context.Context, userID uint64, machine, process string, data []byte) error } -func (d *DataAccessService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit uint64, offset uint64) (*types.MachineMetricsData, error) { - return d.dummy.GetUserMachineMetrics(ctx, userID, limit, offset) +func (d *DataAccessService) GetUserMachineMetrics(ctx context.Context, userID uint64, limit int, offset int) (*apiTypes.MachineMetricsData, error) { + data := &apiTypes.MachineMetricsData{} + + g := errgroup.Group{} + + g.Go(func() error { + var err error + data.SystemMetrics, err = d.bigtable.GetMachineMetricsSystem(types.UserId(userID), limit, offset) + return err + }) + + g.Go(func() error { + var err error + data.ValidatorMetrics, err = d.bigtable.GetMachineMetricsValidator(types.UserId(userID), limit, offset) + return err + }) + + g.Go(func() error { + var err error + data.NodeMetrics, err = d.bigtable.GetMachineMetricsNode(types.UserId(userID), limit, offset) + return err + }) + + if err := g.Wait(); err != nil { + return nil, errors.Wrap(err, "could not get stats") + } + + return data, nil +} + +func (d *DataAccessService) PostUserMachineMetrics(ctx context.Context, userID uint64, machine, process string, data []byte) error { + err := db.BigtableClient.SaveMachineMetric(process, types.UserId(userID), machine, data) + if err != nil { + if strings.HasPrefix(err.Error(), "rate limit") { + return err + } + return errors.Wrap(err, "could not save stats") + } + return nil } diff --git a/backend/pkg/api/data_access/app.go b/backend/pkg/api/data_access/mobile.go similarity index 94% rename from backend/pkg/api/data_access/app.go rename to backend/pkg/api/data_access/mobile.go index e2c09c268..dc526746b 100644 --- a/backend/pkg/api/data_access/app.go +++ b/backend/pkg/api/data_access/mobile.go @@ -27,6 +27,7 @@ type AppRepository interface { AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error + GetValidatorDashboardMobileValidators(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBMobileValidatorsColumn], search string, limit uint64) ([]t.MobileValidatorDashboardValidatorsTableRow, *t.Paging, error) } // GetUserIdByRefreshToken basically used to confirm the claimed user id with the refresh token. Returns the userId if successful @@ -170,7 +171,7 @@ func (d *DataAccessService) GetValidatorDashboardMobileWidget(ctx context.Contex if err != nil { return nil, fmt.Errorf("error retrieving validator dashboard overview data: %w", err) } - data.NetworkEfficiency = d.calculateTotalEfficiency( + data.NetworkEfficiency = utils.CalculateTotalEfficiency( efficiency.AttestationEfficiency[enums.AllTime], efficiency.ProposalEfficiency[enums.AllTime], efficiency.SyncEfficiency[enums.AllTime]) // Validator status @@ -326,7 +327,7 @@ func (d *DataAccessService) GetValidatorDashboardMobileWidget(ctx context.Contex syncEfficiency.Float64 = float64(queryResult.SyncExecuted) / float64(queryResult.SyncScheduled) syncEfficiency.Valid = true } - *efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + *efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) return nil }) @@ -361,3 +362,7 @@ func (d *DataAccessService) internal_rp_network_stats() (*t.RPNetworkStats, erro `) return &networkStats, err } + +func (d *DataAccessService) GetValidatorDashboardMobileValidators(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBMobileValidatorsColumn], search string, limit uint64) ([]t.MobileValidatorDashboardValidatorsTableRow, *t.Paging, error) { + return d.dummy.GetValidatorDashboardMobileValidators(ctx, dashboardId, period, cursor, colSort, search, limit) +} diff --git a/backend/pkg/api/data_access/networks.go b/backend/pkg/api/data_access/networks.go index df95fb008..30c0669ec 100644 --- a/backend/pkg/api/data_access/networks.go +++ b/backend/pkg/api/data_access/networks.go @@ -12,16 +12,19 @@ func (d *DataAccessService) GetAllNetworks() ([]types.NetworkInfo, error) { return []types.NetworkInfo{ { - ChainId: 1, - Name: "ethereum", + ChainId: 1, + Name: "ethereum", + NotificationsName: "mainnet", }, { - ChainId: 100, - Name: "gnosis", + ChainId: 100, + Name: "gnosis", + NotificationsName: "gnosis", }, { - ChainId: 17000, - Name: "holesky", + ChainId: 17000, + Name: "holesky", + NotificationsName: "holesky", }, }, nil } diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 776308329..868efb171 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -8,7 +8,6 @@ import ( "encoding/gob" "fmt" "io" - "maps" "regexp" "slices" "sort" @@ -19,6 +18,7 @@ import ( "github.com/doug-martin/goqu/v9" "github.com/doug-martin/goqu/v9/exp" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/params" "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -27,7 +27,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/gobitfly/beaconchain/pkg/notification" + n "github.com/gobitfly/beaconchain/pkg/notification" "github.com/lib/pq" "github.com/shopspring/decimal" "golang.org/x/sync/errgroup" @@ -50,28 +50,35 @@ type NotificationsRepository interface { GetNotificationSettingsDefaultValues(ctx context.Context) (*t.NotificationSettingsDefaultValues, error) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error - UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error - DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error + UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId uint64, name string, IsNotificationsEnabled bool) error + DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId uint64) error UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error UpdateNotificationSettingsAccountDashboard(ctx context.Context, userId uint64, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error + + QueueTestEmailNotification(ctx context.Context, userId uint64) error + QueueTestPushNotification(ctx context.Context, userId uint64) error + QueueTestWebhookNotification(ctx context.Context, userId uint64, webhookUrl string, isDiscordWebhook bool) error } func (*DataAccessService) registerNotificationInterfaceTypes() { var once sync.Once once.Do(func() { - gob.Register(¬ification.ValidatorProposalNotification{}) - gob.Register(¬ification.ValidatorAttestationNotification{}) - gob.Register(¬ification.ValidatorIsOfflineNotification{}) - gob.Register(¬ification.ValidatorGotSlashedNotification{}) - gob.Register(¬ification.ValidatorWithdrawalNotification{}) - gob.Register(¬ification.NetworkNotification{}) - gob.Register(¬ification.RocketpoolNotification{}) - gob.Register(¬ification.MonitorMachineNotification{}) - gob.Register(¬ification.TaxReportNotification{}) - gob.Register(¬ification.EthClientNotification{}) - gob.Register(¬ification.SyncCommitteeSoonNotification{}) + gob.Register(&n.ValidatorProposalNotification{}) + gob.Register(&n.ValidatorUpcomingProposalNotification{}) + gob.Register(&n.ValidatorGroupEfficiencyNotification{}) + gob.Register(&n.ValidatorAttestationNotification{}) + gob.Register(&n.ValidatorIsOfflineNotification{}) + gob.Register(&n.ValidatorIsOnlineNotification{}) + gob.Register(&n.ValidatorGotSlashedNotification{}) + gob.Register(&n.ValidatorWithdrawalNotification{}) + gob.Register(&n.NetworkNotification{}) + gob.Register(&n.RocketpoolNotification{}) + gob.Register(&n.MonitorMachineNotification{}) + gob.Register(&n.TaxReportNotification{}) + gob.Register(&n.EthClientNotification{}) + gob.Register(&n.SyncCommitteeSoonNotification{}) }) } @@ -81,7 +88,7 @@ const ( DiscordWebhookFormat string = "discord" - GroupOfflineThresholdDefault float64 = 0.1 + GroupEfficiencyBelowThresholdDefault float64 = 0.95 MaxCollateralThresholdDefault float64 = 1.0 MinCollateralThresholdDefault float64 = 0.2 ERC20TokenTransfersValueThresholdDefault float64 = 0.1 @@ -221,7 +228,7 @@ func (d *DataAccessService) GetNotificationOverview(ctx context.Context, userId if len(whereNetwork) > 0 { whereNetwork += " OR " } - whereNetwork += "event_name like '" + network.Name + ":rocketpool_%' OR event_name like '" + network.Name + ":network_%'" + whereNetwork += "event_name like '" + network.NotificationsName + ":rocketpool_%' OR event_name like '" + network.NotificationsName + ":network_%'" } query := goqu.Dialect("postgres"). @@ -243,7 +250,7 @@ func (d *DataAccessService) GetNotificationOverview(ctx context.Context, userId return err } - err = d.alloyReader.GetContext(ctx, &response, querySql, args...) + err = d.userReader.GetContext(ctx, &response, querySql, args...) return err }) @@ -284,7 +291,6 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI )). Where( goqu.Ex{"uvd.user_id": userId}, - goqu.L("uvd.network = ANY(?)", pq.Array(chainIds)), ). GroupBy( goqu.I("uvdnh.epoch"), @@ -294,6 +300,12 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI goqu.I("uvdg.name"), ) + if chainIds != nil { + vdbQuery = vdbQuery.Where( + goqu.L("uvd.network = ANY(?)", pq.Array(chainIds)), + ) + } + // TODO account dashboards /*adbQuery := goqu.Dialect("postgres"). From(goqu.T("adb_notifications_history").As("anh")). @@ -387,18 +399,15 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64, search string) (*t.NotificationValidatorDashboardDetail, error) { notificationDetails := t.NotificationValidatorDashboardDetail{ ValidatorOffline: []uint64{}, - GroupOffline: []t.NotificationEventGroup{}, - ProposalMissed: []t.IndexBlocks{}, + ProposalMissed: []t.IndexSlots{}, ProposalDone: []t.IndexBlocks{}, - UpcomingProposals: []t.IndexBlocks{}, + UpcomingProposals: []t.IndexSlots{}, Slashed: []uint64{}, SyncCommittee: []uint64{}, AttestationMissed: []t.IndexEpoch{}, - Withdrawal: []t.IndexBlocks{}, + Withdrawal: []t.NotificationEventWithdrawal{}, ValidatorOfflineReminder: []uint64{}, - GroupOfflineReminder: []t.NotificationEventGroup{}, ValidatorBackOnline: []t.NotificationEventValidatorBackOnline{}, - GroupBackOnline: []t.NotificationEventGroupBackOnline{}, MinimumCollateralReached: []t.Address{}, MaximumCollateralReached: []t.Address{}, } @@ -419,38 +428,45 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context searchIndexSet[searchIndex] = true } - result := []byte{} - query := `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` - err := d.alloyReader.GetContext(ctx, &result, query, dashboardId, groupId, epoch) + // ------------------------------------- + // dashboard and group name + query := `SELECT + uvd.name AS dashboard_name, + uvdg.name AS group_name + FROM + users_val_dashboards uvd + INNER JOIN + users_val_dashboards_groups uvdg ON uvdg.dashboard_id = uvd.id + WHERE uvd.id = $1 AND uvdg.id = $2` + err := d.alloyReader.GetContext(ctx, ¬ificationDetails, query, dashboardId, groupId) if err != nil { if err == sql.ErrNoRows { return ¬ificationDetails, nil } return nil, err } - if len(result) == 0 { - return ¬ificationDetails, nil + if notificationDetails.GroupName == "" { + notificationDetails.GroupName = t.DefaultGroupName } - - buf := bytes.NewBuffer(result) - gz, err := gzip.NewReader(buf) - if err != nil { - return nil, err + if notificationDetails.DashboardName == "" { + notificationDetails.DashboardName = t.DefaultDashboardName } - defer gz.Close() - // might need to loop if we get memory issues with large dashboards and can't ReadAll - decompressedData, err := io.ReadAll(gz) + // ------------------------------------- + // retrieve notification events + eventTypesEncodedList := [][]byte{} + query = `SELECT details FROM users_val_dashboards_notifications_history WHERE dashboard_id = $1 AND group_id = $2 AND epoch = $3` + err = d.alloyReader.SelectContext(ctx, &eventTypesEncodedList, query, dashboardId, groupId, epoch) if err != nil { return nil, err } + if len(eventTypesEncodedList) == 0 { + return ¬ificationDetails, nil + } - decoder := gob.NewDecoder(bytes.NewReader(decompressedData)) - - notifications := []types.Notification{} - err = decoder.Decode(¬ifications) + latestBlocks, err := d.GetLatestBlockHeightsForEpoch(ctx, epoch) if err != nil { - return nil, err + return nil, fmt.Errorf("error getting latest block height: %w", err) } type ProposalInfo struct { @@ -461,124 +477,160 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context proposalsInfo := make(map[t.VDBValidator]*ProposalInfo) addressMapping := make(map[string]*t.Address) - for _, not := range notifications { - switch not.GetEventName() { - case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : - // aggregate proposals - curNotification, ok := not.(*notification.ValidatorProposalNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { - proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} - } - prop := proposalsInfo[curNotification.ValidatorIndex] - switch curNotification.Status { - case 0: - prop.Scheduled = append(prop.Scheduled, curNotification.Slot) - case 1: - prop.Proposed = append(prop.Proposed, curNotification.Block) - case 2: - prop.Missed = append(prop.Missed, curNotification.Slot) - } - case types.ValidatorMissedAttestationEventName: - curNotification, ok := not.(*notification.ValidatorAttestationNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if curNotification.Status != 0 { - continue - } - notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) - case types.ValidatorGotSlashedEventName: - curNotification, ok := not.(*notification.ValidatorGotSlashedNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) - case types.ValidatorIsOfflineEventName: - curNotification, ok := not.(*notification.ValidatorIsOfflineNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - if curNotification.IsOffline { + contractStatusRequests := make([]db.ContractInteractionAtRequest, 0) + for _, eventTypesEncoded := range eventTypesEncodedList { + buf := bytes.NewBuffer(eventTypesEncoded) + gz, err := gzip.NewReader(buf) + if err != nil { + return nil, err + } + defer gz.Close() + + // might need to loop if we get memory issues + eventTypes, err := io.ReadAll(gz) + if err != nil { + return nil, err + } + + decoder := gob.NewDecoder(bytes.NewReader(eventTypes)) + + notifications := []types.Notification{} + err = decoder.Decode(¬ifications) + if err != nil { + return nil, err + } + + for _, notification := range notifications { + switch notification.GetEventName() { + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName /*, types.ValidatorScheduledProposalEventName*/ : + // aggregate proposals + curNotification, ok := notification.(*n.ValidatorProposalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorProposalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if _, ok := proposalsInfo[curNotification.ValidatorIndex]; !ok { + proposalsInfo[curNotification.ValidatorIndex] = &ProposalInfo{} + } + prop := proposalsInfo[curNotification.ValidatorIndex] + switch curNotification.Status { + case 0: + prop.Scheduled = append(prop.Scheduled, curNotification.Slot) + case 1: + prop.Proposed = append(prop.Proposed, curNotification.Block) + case 2: + prop.Missed = append(prop.Missed, curNotification.Slot) + } + case types.ValidatorMissedAttestationEventName: + curNotification, ok := notification.(*n.ValidatorAttestationNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorAttestationNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + if curNotification.Status != 0 { + continue + } + notificationDetails.AttestationMissed = append(notificationDetails.AttestationMissed, t.IndexEpoch{Index: curNotification.ValidatorIndex, Epoch: curNotification.Epoch}) + case types.ValidatorUpcomingProposalEventName: + curNotification, ok := notification.(*n.ValidatorUpcomingProposalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorUpcomingProposalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexSlots{Index: curNotification.ValidatorIndex, Slots: []uint64{curNotification.Slot}}) + case types.ValidatorGotSlashedEventName: + curNotification, ok := notification.(*n.ValidatorGotSlashedNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorGotSlashedNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.Slashed = append(notificationDetails.Slashed, curNotification.ValidatorIndex) + case types.ValidatorIsOfflineEventName: + curNotification, ok := notification.(*n.ValidatorIsOfflineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOfflineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } notificationDetails.ValidatorOffline = append(notificationDetails.ValidatorOffline, curNotification.ValidatorIndex) - } else { - // TODO EpochCount is not correct, missing / cumbersome to retrieve from backend - using "back online since" instead atm + // TODO not present in backend yet + //notificationDetails.ValidatorOfflineReminder = ... + case types.ValidatorIsOnlineEventName: + curNotification, ok := notification.(*n.ValidatorIsOnlineNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorIsOnlineNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } notificationDetails.ValidatorBackOnline = append(notificationDetails.ValidatorBackOnline, t.NotificationEventValidatorBackOnline{Index: curNotification.ValidatorIndex, EpochCount: curNotification.Epoch}) + case types.ValidatorReceivedWithdrawalEventName: + curNotification, ok := notification.(*n.ValidatorWithdrawalNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + // incorrect formatting TODO rework the Address and ContractInteractionAtRequest types to use clear string formatting (or prob go-ethereum common.Address) + contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", curNotification.Address), + Block: int64(latestBlocks[curNotification.Slot%utils.Config.Chain.ClConfig.SlotsPerEpoch]), + TxIdx: -1, + TraceIdx: -1, + }) + addr := t.Address{Hash: t.Hash(hexutil.Encode(curNotification.Address))} + addressMapping[hexutil.Encode(curNotification.Address)] = &addr + notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.NotificationEventWithdrawal{ + Index: curNotification.ValidatorIndex, + Amount: decimal.NewFromUint64(curNotification.Amount).Mul(decimal.NewFromFloat(params.GWei)), // Amounts have to be in WEI + Address: addr, + }) + case types.NetworkLivenessIncreasedEventName, + types.EthClientUpdateEventName, + types.MonitoringMachineOfflineEventName, + types.MonitoringMachineDiskAlmostFullEventName, + types.MonitoringMachineCpuLoadEventName, + types.MonitoringMachineMemoryUsageEventName, + types.TaxReportEventName: + // not vdb notifications, skip + case types.ValidatorDidSlashEventName: + case types.RocketpoolCommissionThresholdEventName, + types.RocketpoolNewClaimRoundStartedEventName: + // these could maybe returned later (?) + case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: + _, ok := notification.(*n.RocketpoolNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") + } + addr := t.Address{Hash: t.Hash(notification.GetEventFilter()), IsContract: true} + addressMapping[notification.GetEventFilter()] = &addr + if notification.GetEventName() == types.RocketpoolCollateralMinReachedEventName { + notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) + } else { + notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) + } + case types.SyncCommitteeSoonEventName: + curNotification, ok := notification.(*n.SyncCommitteeSoonNotification) + if !ok { + return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") + } + if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { + continue + } + notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) + default: + log.Debugf("Unhandled notification type: %s", notification.GetEventName()) } - // TODO not present in backend yet - //notificationDetails.ValidatorOfflineReminder = ... - case types.ValidatorGroupIsOfflineEventName: - // TODO type / collection not present yet, skipping - /*curNotification, ok := not.(*notification.validatorGroupIsOfflineNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to validatorGroupIsOfflineNotification") - } - if curNotification.Status == 0 { - notificationDetails.GroupOffline = ... - notificationDetails.GroupOfflineReminder = ... - } else { - notificationDetails.GroupBackOnline = ... - } - */ - case types.ValidatorReceivedWithdrawalEventName: - curNotification, ok := not.(*notification.ValidatorWithdrawalNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to ValidatorWithdrawalNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - // TODO might need to take care of automatic + exit withdrawal happening in the same epoch ? - notificationDetails.Withdrawal = append(notificationDetails.Withdrawal, t.IndexBlocks{Index: curNotification.ValidatorIndex, Blocks: []uint64{curNotification.Slot}}) - case types.NetworkLivenessIncreasedEventName, - types.EthClientUpdateEventName, - types.MonitoringMachineOfflineEventName, - types.MonitoringMachineDiskAlmostFullEventName, - types.MonitoringMachineCpuLoadEventName, - types.MonitoringMachineMemoryUsageEventName, - types.TaxReportEventName: - // not vdb notifications, skip - case types.ValidatorDidSlashEventName: - case types.RocketpoolCommissionThresholdEventName, - types.RocketpoolNewClaimRoundStartedEventName: - // these could maybe returned later (?) - case types.RocketpoolCollateralMinReachedEventName, types.RocketpoolCollateralMaxReachedEventName: - _, ok := not.(*notification.RocketpoolNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to RocketpoolNotification") - } - addr := t.Address{Hash: t.Hash(not.GetEventFilter()), IsContract: true} - addressMapping[not.GetEventFilter()] = &addr - if not.GetEventName() == types.RocketpoolCollateralMinReachedEventName { - notificationDetails.MinimumCollateralReached = append(notificationDetails.MinimumCollateralReached, addr) - } else { - notificationDetails.MaximumCollateralReached = append(notificationDetails.MaximumCollateralReached, addr) - } - case types.SyncCommitteeSoonEventName: - curNotification, ok := not.(*notification.SyncCommitteeSoonNotification) - if !ok { - return nil, fmt.Errorf("failed to cast notification to SyncCommitteeSoonNotification") - } - if searchEnabled && !searchIndexSet[curNotification.ValidatorIndex] { - continue - } - notificationDetails.SyncCommittee = append(notificationDetails.SyncCommittee, curNotification.ValidatorIndex) - default: - log.Debugf("Unhandled notification type: %s", not.GetEventName()) } } @@ -588,10 +640,10 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context notificationDetails.ProposalDone = append(notificationDetails.ProposalDone, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Proposed}) } if len(proposalInfo.Scheduled) > 0 { - notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Scheduled}) + notificationDetails.UpcomingProposals = append(notificationDetails.UpcomingProposals, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Scheduled}) } if len(proposalInfo.Missed) > 0 { - notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexBlocks{Index: validatorIndex, Blocks: proposalInfo.Missed}) + notificationDetails.ProposalMissed = append(notificationDetails.ProposalMissed, t.IndexSlots{Index: validatorIndex, Slots: proposalInfo.Missed}) } } @@ -599,6 +651,14 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { return nil, err } + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { + return nil, err + } + contractStatusPerAddress := make(map[string]int) + for i, contractStatus := range contractStatusRequests { + contractStatusPerAddress["0x"+contractStatus.Address] = i + } for i := range notificationDetails.MinimumCollateralReached { if address, ok := addressMapping[string(notificationDetails.MinimumCollateralReached[i].Hash)]; ok { notificationDetails.MinimumCollateralReached[i] = *address @@ -609,6 +669,13 @@ func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context notificationDetails.MaximumCollateralReached[i] = *address } } + for i := range notificationDetails.Withdrawal { + if address, ok := addressMapping[string(notificationDetails.Withdrawal[i].Address.Hash)]; ok { + notificationDetails.Withdrawal[i].Address = *address + } + contractStatus := contractStatuses[contractStatusPerAddress[string(notificationDetails.Withdrawal[i].Address.Hash)]] + notificationDetails.Withdrawal[i].Address.IsContract = contractStatus == types.CONTRACT_CREATION || contractStatus == types.CONTRACT_PRESENT + } return ¬ificationDetails, nil } @@ -997,9 +1064,9 @@ func (d *DataAccessService) GetRocketPoolNotifications(ctx context.Context, user // switch notification.EventType { // case types.RocketpoolNewClaimRoundStartedEventName: // resultEntry.EventType = "reward_round" - // case types.RocketpoolCollateralMinReached: + // case types.RocketpoolCollateralMinReachedEventName: // resultEntry.EventType = "collateral_min" - // case types.RocketpoolCollateralMaxReached: + // case types.RocketpoolCollateralMaxReachedEventName: // resultEntry.EventType = "collateral_max" // default: // return nil, nil, fmt.Errorf("invalid event name for rocketpool notification: %v", notification.EventType) @@ -1165,7 +1232,7 @@ func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId } networksSettings := make(map[string]*t.NotificationNetwork, len(networks)) for _, network := range networks { - networksSettings[network.Name] = &t.NotificationNetwork{ + networksSettings[network.NotificationsName] = &t.NotificationNetwork{ ChainId: network.ChainId, Settings: t.NotificationSettingsNetwork{ GasAboveThreshold: decimal.NewFromFloat(GasAboveThresholdDefault).Mul(decimal.NewFromInt(params.GWei)), @@ -1250,20 +1317,20 @@ func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId // ------------------------------------- // Get the paired devices pairedDevices := []struct { - DeviceIdentifier sql.NullString `db:"device_identifier"` - CreatedTs time.Time `db:"created_ts"` - DeviceName string `db:"device_name"` - NotifyEnabled bool `db:"notify_enabled"` + DeviceId uint64 `db:"id"` + CreatedTs time.Time `db:"created_ts"` + DeviceName string `db:"device_name"` + NotifyEnabled bool `db:"notify_enabled"` }{} wg.Go(func() error { err := d.userReader.SelectContext(ctx, &pairedDevices, ` SELECT - device_identifier, + id, created_ts, device_name, COALESCE(notify_enabled, false) AS notify_enabled FROM users_devices - WHERE user_id = $1 AND device_identifier IS NOT NULL`, userId) + WHERE user_id = $1`, userId) if err != nil { return fmt.Errorf(`error retrieving data for notifications paired devices: %w`, err) } @@ -1312,6 +1379,10 @@ func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId networkName := eventSplit[0] networkEvent := types.EventName(eventSplit[1]) + if _, ok := networksSettings[networkName]; !ok { + return nil, fmt.Errorf("network is not defined: %s", networkName) + } + switch networkEvent { case types.RocketpoolNewClaimRoundStartedEventName: networksSettings[networkName].Settings.IsNewRewardRoundSubscribed = true @@ -1354,7 +1425,7 @@ func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId for _, device := range pairedDevices { result.PairedDevices = append(result.PairedDevices, t.NotificationPairedDevice{ - Id: device.DeviceIdentifier.String, + Id: device.DeviceId, PairedTimestamp: device.CreatedTs.Unix(), Name: device.DeviceName, IsNotificationsEnabled: device.NotifyEnabled, @@ -1381,7 +1452,7 @@ func (d *DataAccessService) GetNotificationSettings(ctx context.Context, userId func (d *DataAccessService) GetNotificationSettingsDefaultValues(ctx context.Context) (*t.NotificationSettingsDefaultValues, error) { return &t.NotificationSettingsDefaultValues{ - GroupOfflineThreshold: GroupOfflineThresholdDefault, + GroupEfficiencyBelowThreshold: GroupEfficiencyBelowThresholdDefault, MaxCollateralThreshold: MaxCollateralThresholdDefault, MinCollateralThreshold: MinCollateralThresholdDefault, ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, @@ -1439,10 +1510,10 @@ func (d *DataAccessService) UpdateNotificationSettingsGeneral(ctx context.Contex // Collect the machine and rocketpool events to set and delete //Machine events - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineOfflineSubscribed, userId, string(types.MonitoringMachineOfflineEventName), "", epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineStorageUsageSubscribed, userId, string(types.MonitoringMachineDiskAlmostFullEventName), "", epoch, settings.MachineStorageUsageThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineCpuUsageSubscribed, userId, string(types.MonitoringMachineCpuLoadEventName), "", epoch, settings.MachineCpuUsageThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineMemoryUsageSubscribed, userId, string(types.MonitoringMachineMemoryUsageEventName), "", epoch, settings.MachineMemoryUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineOfflineSubscribed, userId, types.MonitoringMachineOfflineEventName, "", "", epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineStorageUsageSubscribed, userId, types.MonitoringMachineDiskAlmostFullEventName, "", "", epoch, settings.MachineStorageUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineCpuUsageSubscribed, userId, types.MonitoringMachineCpuLoadEventName, "", "", epoch, settings.MachineCpuUsageThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMachineMemoryUsageSubscribed, userId, types.MonitoringMachineMemoryUsageEventName, "", "", epoch, settings.MachineMemoryUsageThreshold) // Insert all the events or update the threshold if they already exist if len(eventsToInsert) > 0 { @@ -1500,7 +1571,7 @@ func (d *DataAccessService) UpdateNotificationSettingsNetworks(ctx context.Conte networkName := "" for _, network := range networks { if network.ChainId == chainId { - networkName = network.Name + networkName = network.NotificationsName break } } @@ -1517,14 +1588,10 @@ func (d *DataAccessService) UpdateNotificationSettingsNetworks(ctx context.Conte } defer utils.Rollback(tx) - eventName := fmt.Sprintf("%s:%s", networkName, types.NetworkGasAboveThresholdEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasAboveSubscribed, userId, eventName, "", epoch, settings.GasAboveThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) - eventName = fmt.Sprintf("%s:%s", networkName, types.NetworkGasBelowThresholdEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasBelowSubscribed, userId, eventName, "", epoch, settings.GasBelowThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) - eventName = fmt.Sprintf("%s:%s", networkName, types.NetworkParticipationRateThresholdEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsParticipationRateSubscribed, userId, eventName, "", epoch, settings.ParticipationRateThreshold) - eventName = fmt.Sprintf("%s:%s", networkName, types.RocketpoolNewClaimRoundStartedEventName) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsNewRewardRoundSubscribed, userId, eventName, "", epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasAboveSubscribed, userId, types.NetworkGasAboveThresholdEventName, networkName, "", epoch, settings.GasAboveThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGasBelowSubscribed, userId, types.NetworkGasBelowThresholdEventName, networkName, "", epoch, settings.GasBelowThreshold.Div(decimal.NewFromInt(params.GWei)).InexactFloat64()) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsParticipationRateSubscribed, userId, types.NetworkParticipationRateThresholdEventName, networkName, "", epoch, settings.ParticipationRateThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsNewRewardRoundSubscribed, userId, types.RocketpoolNewClaimRoundStartedEventName, networkName, "", epoch, 0) // Insert all the events or update the threshold if they already exist if len(eventsToInsert) > 0 { @@ -1571,13 +1638,13 @@ func (d *DataAccessService) UpdateNotificationSettingsNetworks(ctx context.Conte } return nil } -func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { +func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId uint64, name string, IsNotificationsEnabled bool) error { result, err := d.userWriter.ExecContext(ctx, ` UPDATE users_devices SET device_name = $1, notify_enabled = $2 - WHERE user_id = $3 AND device_identifier = $4`, + WHERE user_id = $3 AND id = $4`, name, IsNotificationsEnabled, userId, pairedDeviceId) if err != nil { return err @@ -1589,14 +1656,14 @@ func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.C return err } if rowsAffected == 0 { - return fmt.Errorf("device with id %s to update notification settings not found", pairedDeviceId) + return fmt.Errorf("device with id %v to update notification settings not found", pairedDeviceId) } return nil } -func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { +func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId uint64) error { result, err := d.userWriter.ExecContext(ctx, ` DELETE FROM users_devices - WHERE user_id = $1 AND device_identifier = $2`, + WHERE user_id = $1 AND id = $2`, userId, pairedDeviceId) if err != nil { return err @@ -1608,7 +1675,7 @@ func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.C return err } if rowsAffected == 0 { - return fmt.Errorf("device with id %s to delete not found", pairedDeviceId) + return fmt.Errorf("device with id %v to delete not found", pairedDeviceId) } return nil } @@ -1701,7 +1768,6 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex Network uint64 `db:"network"` WebhookUrl sql.NullString `db:"webhook_target"` IsWebhookDiscordEnabled sql.NullBool `db:"discord_webhook"` - IsRealTimeModeEnabled sql.NullBool `db:"realtime_notifications"` }{} wg.Go(func() error { err := d.alloyReader.SelectContext(ctx, &valDashboards, ` @@ -1712,8 +1778,7 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex g.name AS group_name, d.network, g.webhook_target, - (g.webhook_format = $1) AS discord_webhook, - g.realtime_notifications + (g.webhook_format = $1) AS discord_webhook FROM users_val_dashboards d INNER JOIN users_val_dashboards_groups g ON d.id = g.dashboard_id WHERE d.user_id = $2`, DiscordWebhookFormat, userId) @@ -1765,36 +1830,36 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex // ------------------------------------- // Evaluate the data - type NotificationSettingsDashboardsInfo struct { - IsAccountDashboard bool // if false it's a validator dashboard - DashboardId uint64 - DashboardName string - GroupId uint64 - GroupName string - // if it's a validator dashboard, Settings is NotificationSettingsAccountDashboard, otherwise NotificationSettingsValidatorDashboard - Settings interface{} - ChainIds []uint64 - } - settingsMap := make(map[string]*NotificationSettingsDashboardsInfo) + resultMap := make(map[string]*t.NotificationSettingsDashboardsTableRow) for _, event := range events { - eventSplit := strings.Split(event.Filter, ":") - if len(eventSplit) != 3 { + eventFilterSplit := strings.Split(event.Filter, ":") + if len(eventFilterSplit) != 3 { continue } - dashboardType := eventSplit[0] + dashboardType := eventFilterSplit[0] - if _, ok := settingsMap[event.Filter]; !ok { + eventNameSplit := strings.Split(string(event.Name), ":") + if len(eventNameSplit) != 2 && dashboardType == ValidatorDashboardEventPrefix { + return nil, nil, fmt.Errorf("invalid event name formatting for val dashboard notification: expected {network:event_name}, got %v", event.Name) + } + + eventName := event.Name + if len(eventNameSplit) == 2 { + eventName = types.EventName(eventNameSplit[1]) + } + + if _, ok := resultMap[event.Filter]; !ok { if dashboardType == ValidatorDashboardEventPrefix { - settingsMap[event.Filter] = &NotificationSettingsDashboardsInfo{ + resultMap[event.Filter] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsValidatorDashboard{ - GroupOfflineThreshold: GroupOfflineThresholdDefault, - MaxCollateralThreshold: MaxCollateralThresholdDefault, - MinCollateralThreshold: MinCollateralThresholdDefault, + GroupEfficiencyBelowThreshold: GroupEfficiencyBelowThresholdDefault, + MaxCollateralThreshold: MaxCollateralThresholdDefault, + MinCollateralThreshold: MinCollateralThresholdDefault, }, } } else if dashboardType == AccountDashboardEventPrefix { - settingsMap[event.Filter] = &NotificationSettingsDashboardsInfo{ + resultMap[event.Filter] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsAccountDashboard{ ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, }, @@ -1802,17 +1867,17 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex } } - switch settings := settingsMap[event.Filter].Settings.(type) { + switch settings := resultMap[event.Filter].Settings.(type) { case t.NotificationSettingsValidatorDashboard: - switch event.Name { + switch eventName { case types.ValidatorIsOfflineEventName: settings.IsValidatorOfflineSubscribed = true - case types.GroupIsOfflineEventName: - settings.IsGroupOfflineSubscribed = true - settings.GroupOfflineThreshold = event.Threshold + case types.ValidatorGroupEfficiencyEventName: + settings.IsGroupEfficiencyBelowSubscribed = true + settings.GroupEfficiencyBelowThreshold = event.Threshold case types.ValidatorMissedAttestationEventName: settings.IsAttestationsMissedSubscribed = true - case types.ValidatorProposalEventName: + case types.ValidatorMissedProposalEventName, types.ValidatorExecutedProposalEventName: settings.IsBlockProposalSubscribed = true case types.ValidatorUpcomingProposalEventName: settings.IsUpcomingBlockProposalSubscribed = true @@ -1822,16 +1887,16 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex settings.IsWithdrawalProcessedSubscribed = true case types.ValidatorGotSlashedEventName: settings.IsSlashedSubscribed = true - case types.RocketpoolCollateralMinReached: + case types.RocketpoolCollateralMinReachedEventName: settings.IsMinCollateralSubscribed = true settings.MinCollateralThreshold = event.Threshold - case types.RocketpoolCollateralMaxReached: + case types.RocketpoolCollateralMaxReachedEventName: settings.IsMaxCollateralSubscribed = true settings.MaxCollateralThreshold = event.Threshold } - settingsMap[event.Filter].Settings = settings + resultMap[event.Filter].Settings = settings case t.NotificationSettingsAccountDashboard: - switch event.Name { + switch eventName { case types.IncomingTransactionEventName: settings.IsIncomingTransactionsSubscribed = true case types.OutgoingTransactionEventName: @@ -1844,7 +1909,7 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex case types.ERC1155TokenTransferEventName: settings.IsERC1155TokenTransfersSubscribed = true } - settingsMap[event.Filter].Settings = settings + resultMap[event.Filter].Settings = settings } } @@ -1852,29 +1917,28 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex for _, valDashboard := range valDashboards { key := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, valDashboard.DashboardId, valDashboard.GroupId) - if _, ok := settingsMap[key]; !ok { - settingsMap[key] = &NotificationSettingsDashboardsInfo{ + if _, ok := resultMap[key]; !ok { + resultMap[key] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsValidatorDashboard{ - GroupOfflineThreshold: GroupOfflineThresholdDefault, - MaxCollateralThreshold: MaxCollateralThresholdDefault, - MinCollateralThreshold: MinCollateralThresholdDefault, + GroupEfficiencyBelowThreshold: GroupEfficiencyBelowThresholdDefault, + MaxCollateralThreshold: MaxCollateralThresholdDefault, + MinCollateralThreshold: MinCollateralThresholdDefault, }, } } // Set general info - settingsMap[key].IsAccountDashboard = false - settingsMap[key].DashboardId = valDashboard.DashboardId - settingsMap[key].DashboardName = valDashboard.DashboardName - settingsMap[key].GroupId = valDashboard.GroupId - settingsMap[key].GroupName = valDashboard.GroupName - settingsMap[key].ChainIds = []uint64{valDashboard.Network} + resultMap[key].IsAccountDashboard = false + resultMap[key].DashboardId = valDashboard.DashboardId + resultMap[key].DashboardName = valDashboard.DashboardName + resultMap[key].GroupId = valDashboard.GroupId + resultMap[key].GroupName = valDashboard.GroupName + resultMap[key].ChainIds = []uint64{valDashboard.Network} // Set the settings - if valSettings, ok := settingsMap[key].Settings.(*t.NotificationSettingsValidatorDashboard); ok { + if valSettings, ok := resultMap[key].Settings.(*t.NotificationSettingsValidatorDashboard); ok { valSettings.WebhookUrl = valDashboard.WebhookUrl.String valSettings.IsWebhookDiscordEnabled = valDashboard.IsWebhookDiscordEnabled.Bool - valSettings.IsRealTimeModeEnabled = valDashboard.IsRealTimeModeEnabled.Bool } } @@ -1882,8 +1946,8 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex for _, accDashboard := range accDashboards { key := fmt.Sprintf("%s:%d:%d", AccountDashboardEventPrefix, accDashboard.DashboardId, accDashboard.GroupId) - if _, ok := settingsMap[key]; !ok { - settingsMap[key] = &NotificationSettingsDashboardsInfo{ + if _, ok := resultMap[key]; !ok { + resultMap[key] = &t.NotificationSettingsDashboardsTableRow{ Settings: t.NotificationSettingsAccountDashboard{ ERC20TokenTransfersValueThreshold: ERC20TokenTransfersValueThresholdDefault, }, @@ -1891,15 +1955,15 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex } // Set general info - settingsMap[key].IsAccountDashboard = true - settingsMap[key].DashboardId = accDashboard.DashboardId - settingsMap[key].DashboardName = accDashboard.DashboardName - settingsMap[key].GroupId = accDashboard.GroupId - settingsMap[key].GroupName = accDashboard.GroupName - settingsMap[key].ChainIds = accDashboard.SubscribedChainIds + resultMap[key].IsAccountDashboard = true + resultMap[key].DashboardId = accDashboard.DashboardId + resultMap[key].DashboardName = accDashboard.DashboardName + resultMap[key].GroupId = accDashboard.GroupId + resultMap[key].GroupName = accDashboard.GroupName + resultMap[key].ChainIds = accDashboard.SubscribedChainIds // Set the settings - if accSettings, ok := settingsMap[key].Settings.(*t.NotificationSettingsAccountDashboard); ok { + if accSettings, ok := resultMap[key].Settings.(*t.NotificationSettingsAccountDashboard); ok { accSettings.WebhookUrl = accDashboard.WebhookUrl.String accSettings.IsWebhookDiscordEnabled = accDashboard.IsWebhookDiscordEnabled.Bool accSettings.IsIgnoreSpamTransactionsEnabled = accDashboard.IsIgnoreSpamTransactionsEnabled @@ -1910,74 +1974,63 @@ func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Contex // Apply filter if search != "" { lowerSearch := strings.ToLower(search) - for key, setting := range settingsMap { - if !strings.HasPrefix(strings.ToLower(setting.DashboardName), lowerSearch) && - !strings.HasPrefix(strings.ToLower(setting.GroupName), lowerSearch) { - delete(settingsMap, key) + for key, resultEntry := range resultMap { + if !strings.HasPrefix(strings.ToLower(resultEntry.DashboardName), lowerSearch) && + !strings.HasPrefix(strings.ToLower(resultEntry.GroupName), lowerSearch) { + delete(resultMap, key) } } } // Convert to a slice for sorting and paging - settings := slices.Collect(maps.Values(settingsMap)) + for _, resultEntry := range resultMap { + result = append(result, *resultEntry) + } // ------------------------------------- // Sort // Each row is uniquely defined by the dashboardId, groupId, and isAccountDashboard so the sort order is DashboardName/GroupName => DashboardId => GroupId => IsAccountDashboard - var primarySortParam func(resultEntry *NotificationSettingsDashboardsInfo) string + var primarySortParam func(resultEntry t.NotificationSettingsDashboardsTableRow) string switch colSort.Column { case enums.NotificationSettingsDashboardColumns.DashboardName: - primarySortParam = func(resultEntry *NotificationSettingsDashboardsInfo) string { return resultEntry.DashboardName } + primarySortParam = func(resultEntry t.NotificationSettingsDashboardsTableRow) string { return resultEntry.DashboardName } case enums.NotificationSettingsDashboardColumns.GroupName: - primarySortParam = func(resultEntry *NotificationSettingsDashboardsInfo) string { return resultEntry.GroupName } + primarySortParam = func(resultEntry t.NotificationSettingsDashboardsTableRow) string { return resultEntry.GroupName } default: return nil, nil, fmt.Errorf("invalid sort column for notification subscriptions: %v", colSort.Column) } - sort.Slice(settings, func(i, j int) bool { + sort.Slice(result, func(i, j int) bool { if isReverseDirection { - if primarySortParam(settings[i]) == primarySortParam(settings[j]) { - if settings[i].DashboardId == settings[j].DashboardId { - if settings[i].GroupId == settings[j].GroupId { - return settings[i].IsAccountDashboard + if primarySortParam(result[i]) == primarySortParam(result[j]) { + if result[i].DashboardId == result[j].DashboardId { + if result[i].GroupId == result[j].GroupId { + return result[i].IsAccountDashboard } - return settings[i].GroupId > settings[j].GroupId + return result[i].GroupId > result[j].GroupId } - return settings[i].DashboardId > settings[j].DashboardId + return result[i].DashboardId > result[j].DashboardId } - return primarySortParam(settings[i]) > primarySortParam(settings[j]) + return primarySortParam(result[i]) > primarySortParam(result[j]) } else { - if primarySortParam(settings[i]) == primarySortParam(settings[j]) { - if settings[i].DashboardId == settings[j].DashboardId { - if settings[i].GroupId == settings[j].GroupId { - return settings[j].IsAccountDashboard + if primarySortParam(result[i]) == primarySortParam(result[j]) { + if result[i].DashboardId == result[j].DashboardId { + if result[i].GroupId == result[j].GroupId { + return result[j].IsAccountDashboard } - return settings[i].GroupId < settings[j].GroupId + return result[i].GroupId < result[j].GroupId } - return settings[i].DashboardId < settings[j].DashboardId + return result[i].DashboardId < result[j].DashboardId } - return primarySortParam(settings[i]) < primarySortParam(settings[j]) + return primarySortParam(result[i]) < primarySortParam(result[j]) } }) - // ------------------------------------- - // Convert to the final result format - for _, setting := range settings { - result = append(result, t.NotificationSettingsDashboardsTableRow{ - IsAccountDashboard: setting.IsAccountDashboard, - DashboardId: setting.DashboardId, - GroupId: setting.GroupId, - GroupName: setting.GroupName, - Settings: setting.Settings, - ChainIds: setting.ChainIds, - }) - } - // ------------------------------------- // Paging // Find the index for the cursor and limit the data if currentCursor.IsValid() { - for idx, row := range settings { + for idx, row := range result { if row.DashboardId == currentCursor.DashboardId && row.GroupId == currentCursor.GroupId && row.IsAccountDashboard == currentCursor.IsAccountDashboard { @@ -2017,6 +2070,30 @@ func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx con var eventsToInsert []goqu.Record var eventsToDelete []goqu.Expression + // Get the network for the validator dashboard + var chainId uint64 + err := d.alloyReader.GetContext(ctx, &chainId, `SELECT network FROM users_val_dashboards WHERE id = $1 AND user_id = $2`, dashboardId, userId) + if err != nil { + return fmt.Errorf("error getting network for validator dashboard: %w", err) + } + + networks, err := d.GetAllNetworks() + if err != nil { + return err + } + + networkName := "" + for _, network := range networks { + if network.ChainId == chainId { + networkName = network.NotificationsName + break + } + } + if networkName == "" { + return fmt.Errorf("network with chain id %d to update general notification settings not found", chainId) + } + + // Add and remove the events in users_subscriptions tx, err := d.userWriter.BeginTxx(ctx, nil) if err != nil { return fmt.Errorf("error starting db transactions to update validator dashboard notification settings: %w", err) @@ -2025,16 +2102,18 @@ func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx con eventFilter := fmt.Sprintf("%s:%d:%d", ValidatorDashboardEventPrefix, dashboardId, groupId) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsValidatorOfflineSubscribed, userId, string(types.ValidatorIsOfflineEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGroupOfflineSubscribed, userId, string(types.GroupIsOfflineEventName), eventFilter, epoch, settings.GroupOfflineThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsAttestationsMissedSubscribed, userId, string(types.ValidatorMissedAttestationEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, string(types.ValidatorProposalEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsUpcomingBlockProposalSubscribed, userId, string(types.ValidatorUpcomingProposalEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSyncSubscribed, userId, string(types.SyncCommitteeSoon), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsWithdrawalProcessedSubscribed, userId, string(types.ValidatorReceivedWithdrawalEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSlashedSubscribed, userId, string(types.ValidatorGotSlashedEventName), eventFilter, epoch, 0) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMaxCollateralSubscribed, userId, string(types.RocketpoolCollateralMaxReached), eventFilter, epoch, settings.MaxCollateralThreshold) - d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMinCollateralSubscribed, userId, string(types.RocketpoolCollateralMinReached), eventFilter, epoch, settings.MinCollateralThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsValidatorOfflineSubscribed, userId, types.ValidatorIsOfflineEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsGroupEfficiencyBelowSubscribed, userId, types.ValidatorGroupEfficiencyEventName, networkName, eventFilter, epoch, settings.GroupEfficiencyBelowThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsAttestationsMissedSubscribed, userId, types.ValidatorMissedAttestationEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsUpcomingBlockProposalSubscribed, userId, types.ValidatorUpcomingProposalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSyncSubscribed, userId, types.SyncCommitteeSoon, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsWithdrawalProcessedSubscribed, userId, types.ValidatorReceivedWithdrawalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsSlashedSubscribed, userId, types.ValidatorGotSlashedEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMaxCollateralSubscribed, userId, types.RocketpoolCollateralMaxReachedEventName, networkName, eventFilter, epoch, settings.MaxCollateralThreshold) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsMinCollateralSubscribed, userId, types.RocketpoolCollateralMinReachedEventName, networkName, eventFilter, epoch, settings.MinCollateralThreshold) + // Set two events for IsBlockProposalSubscribed + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, types.ValidatorMissedProposalEventName, networkName, eventFilter, epoch, 0) + d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsBlockProposalSubscribed, userId, types.ValidatorExecutedProposalEventName, networkName, eventFilter, epoch, 0) // Insert all the events or update the threshold if they already exist if len(eventsToInsert) > 0 { @@ -2085,9 +2164,8 @@ func (d *DataAccessService) UpdateNotificationSettingsValidatorDashboard(ctx con UPDATE users_val_dashboards_groups SET webhook_target = NULLIF($1, ''), - webhook_format = CASE WHEN $2 THEN $3 ELSE NULL END, - realtime_notifications = CASE WHEN $4 THEN TRUE ELSE NULL END - WHERE dashboard_id = $5 AND id = $6`, settings.WebhookUrl, settings.IsWebhookDiscordEnabled, DiscordWebhookFormat, settings.IsRealTimeModeEnabled, dashboardId, groupId) + webhook_format = CASE WHEN $2 THEN $3 ELSE NULL END + WHERE dashboard_id = $4 AND id = $5`, settings.WebhookUrl, settings.IsWebhookDiscordEnabled, DiscordWebhookFormat, dashboardId, groupId) if err != nil { return err } @@ -2110,11 +2188,11 @@ func (d *DataAccessService) UpdateNotificationSettingsAccountDashboard(ctx conte // eventFilter := fmt.Sprintf("%s:%d:%d", AccountDashboardEventPrefix, dashboardId, groupId) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsIncomingTransactionsSubscribed, userId, string(types.IncomingTransactionEventName), eventFilter, epoch, 0) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsOutgoingTransactionsSubscribed, userId, string(types.OutgoingTransactionEventName), eventFilter, epoch, 0) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC20TokenTransfersSubscribed, userId, string(types.ERC20TokenTransferEventName), eventFilter, epoch, settings.ERC20TokenTransfersValueThreshold) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC721TokenTransfersSubscribed, userId, string(types.ERC721TokenTransferEventName), eventFilter, epoch, 0) - // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC1155TokenTransfersSubscribed, userId, string(types.ERC1155TokenTransferEventName), eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsIncomingTransactionsSubscribed, userId, types.IncomingTransactionEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsOutgoingTransactionsSubscribed, userId, types.OutgoingTransactionEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC20TokenTransfersSubscribed, userId, types.ERC20TokenTransferEventName, "", eventFilter, epoch, settings.ERC20TokenTransfersValueThreshold) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC721TokenTransfersSubscribed, userId, types.ERC721TokenTransferEventName, "", eventFilter, epoch, 0) + // d.AddOrRemoveEvent(&eventsToInsert, &eventsToDelete, settings.IsERC1155TokenTransfersSubscribed, userId, types.ERC1155TokenTransferEventName, "", eventFilter, epoch, 0) // // Insert all the events or update the threshold if they already exist // if len(eventsToInsert) > 0 { @@ -2176,11 +2254,29 @@ func (d *DataAccessService) UpdateNotificationSettingsAccountDashboard(ctx conte return d.dummy.UpdateNotificationSettingsAccountDashboard(ctx, userId, dashboardId, groupId, settings) } -func (d *DataAccessService) AddOrRemoveEvent(eventsToInsert *[]goqu.Record, eventsToDelete *[]goqu.Expression, isSubscribed bool, userId uint64, eventName string, eventFilter string, epoch int64, threshold float64) { +func (d *DataAccessService) AddOrRemoveEvent(eventsToInsert *[]goqu.Record, eventsToDelete *[]goqu.Expression, isSubscribed bool, userId uint64, eventName types.EventName, network, eventFilter string, epoch int64, threshold float64) { + fullEventName := string(eventName) + if network != "" { + fullEventName = fmt.Sprintf("%s:%s", network, eventName) + } + if isSubscribed { - event := goqu.Record{"user_id": userId, "event_name": eventName, "event_filter": eventFilter, "created_ts": goqu.L("NOW()"), "created_epoch": epoch, "event_threshold": threshold} + event := goqu.Record{"user_id": userId, "event_name": fullEventName, "event_filter": eventFilter, "created_ts": goqu.L("NOW()"), "created_epoch": epoch, "event_threshold": threshold} *eventsToInsert = append(*eventsToInsert, event) } else { - *eventsToDelete = append(*eventsToDelete, goqu.Ex{"user_id": userId, "event_name": eventName, "event_filter": eventFilter}) + *eventsToDelete = append(*eventsToDelete, goqu.Ex{"user_id": userId, "event_name": fullEventName, "event_filter": eventFilter}) } } + +func (d *DataAccessService) QueueTestEmailNotification(ctx context.Context, userId uint64) error { + // TODO: @Data Access + return nil +} +func (d *DataAccessService) QueueTestPushNotification(ctx context.Context, userId uint64) error { + // TODO: @Data Access + return nil +} +func (d *DataAccessService) QueueTestWebhookNotification(ctx context.Context, userId uint64, webhookUrl string, isDiscordWebhook bool) error { + // TODO: @Data Access + return nil +} diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 906742ecf..62e689ed4 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -4,10 +4,10 @@ import ( "context" "database/sql" "fmt" - "math" "time" t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/pkg/errors" "golang.org/x/sync/errgroup" @@ -258,460 +258,16 @@ func (d *DataAccessService) GetUserIdByResetHash(ctx context.Context, hash strin return result, err } -var adminPerks = t.PremiumPerks{ - AdFree: false, // admins want to see ads to check ad configuration - ValidatorDashboards: maxJsInt, - ValidatorsPerDashboard: maxJsInt, - ValidatorGroupsPerDashboard: maxJsInt, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: maxJsInt, - Hourly: maxJsInt, - Daily: maxJsInt, - Weekly: maxJsInt, - }, - EmailNotificationsPerDay: maxJsInt, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: maxJsInt, - WebhookEndpoints: maxJsInt, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: maxJsInt, - MachineMonitoringHistorySeconds: maxJsInt, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, -} - func (d *DataAccessService) GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { - // TODO @patrick post-beta improve and unmock - userInfo := &t.UserInfo{ - Id: userId, - ApiKeys: []string{}, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10, - ApiKeys: 4, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - Subscriptions: []t.UserSubscription{}, - } - - productSummary, err := d.GetProductSummary(ctx) - if err != nil { - return nil, fmt.Errorf("error getting productSummary: %w", err) - } - - result := struct { - Email string `db:"email"` - UserGroup string `db:"user_group"` - }{} - err = d.userReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) - if err != nil { - if errors.Is(err, sql.ErrNoRows) { - return nil, fmt.Errorf("%w: user not found", ErrNotFound) - } - return nil, err - } - userInfo.Email = result.Email - userInfo.UserGroup = result.UserGroup - - userInfo.Email = utils.CensorEmail(userInfo.Email) - - err = d.userReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) - if err != nil && err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) - } - - premiumProduct := struct { - ProductId string `db:"product_id"` - Store string `db:"store"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - }{} - err = d.userReader.GetContext(ctx, &premiumProduct, ` - SELECT - COALESCE(uas.product_id, '') AS product_id, - COALESCE(uas.store, '') AS store, - COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, - COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end - FROM users_app_subscriptions uas - LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id - WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') - ORDER BY CASE uas.product_id - WHEN 'orca.yearly' THEN 1 - WHEN 'orca' THEN 2 - WHEN 'dolphin.yearly' THEN 3 - WHEN 'dolphin' THEN 4 - WHEN 'guppy.yearly' THEN 5 - WHEN 'guppy' THEN 6 - WHEN 'whale' THEN 7 - WHEN 'goldfish' THEN 8 - WHEN 'plankton' THEN 9 - ELSE 10 -- For any other product_id values - END, uas.id DESC - LIMIT 1`, userId) - if err != nil { - if err != sql.ErrNoRows { - return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) - } - premiumProduct.ProductId = "premium_free" - premiumProduct.Store = "" - } - - foundProduct := false - for _, p := range productSummary.PremiumProducts { - effectiveProductId := premiumProduct.ProductId - productName := p.ProductName - switch premiumProduct.ProductId { - case "whale": - effectiveProductId = "dolphin" - productName = "Whale" - case "goldfish": - effectiveProductId = "guppy" - productName = "Goldfish" - case "plankton": - effectiveProductId = "guppy" - productName = "Plankton" - } - if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { - userInfo.PremiumPerks = p.PremiumPerks - foundProduct = true - - store := t.ProductStoreStripe - switch premiumProduct.Store { - case "ios-appstore": - store = t.ProductStoreIosAppstore - case "android-playstore": - store = t.ProductStoreAndroidPlaystore - case "ethpool": - store = t.ProductStoreEthpool - case "manuall": - store = t.ProductStoreCustom - } - - if effectiveProductId != "premium_free" { - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: premiumProduct.ProductId, - ProductName: productName, - ProductCategory: t.ProductCategoryPremium, - ProductStore: store, - Start: premiumProduct.Start.Unix(), - End: premiumProduct.End.Unix(), - }) - } - break - } - } - if !foundProduct { - return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) - } - - premiumAddons := []struct { - PriceId string `db:"price_id"` - Start time.Time `db:"start"` - End time.Time `db:"end"` - Quantity int `db:"quantity"` - }{} - err = d.userReader.SelectContext(ctx, &premiumAddons, ` - SELECT - price_id, - to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, - to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, - COALESCE((uss.payload->>'quantity')::int,1) AS quantity - FROM users_stripe_subscriptions uss - INNER JOIN users u ON u.stripe_customer_id = uss.customer_id - WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) - if err != nil { - return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) - } - for _, addon := range premiumAddons { - foundAddon := false - for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { - if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { - foundAddon = true - for i := 0; i < addon.Quantity; i++ { - userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators - userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ - ProductId: utils.PriceIdToProductId(addon.PriceId), - ProductName: p.ProductName, - ProductCategory: t.ProductCategoryPremiumAddon, - ProductStore: t.ProductStoreStripe, - Start: addon.Start.Unix(), - End: addon.End.Unix(), - }) - } - } - } - if !foundAddon { - return nil, fmt.Errorf("addon not found: %v", addon.PriceId) - } - } - - if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { - userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit - } - - if userInfo.UserGroup == t.UserGroupAdmin { - userInfo.PremiumPerks = adminPerks - } - - return userInfo, nil -} - -const hour uint64 = 3600 -const day = 24 * hour -const week = 7 * day -const month = 30 * day -const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) - -var freeTierProduct t.PremiumProduct = t.PremiumProduct{ - ProductName: "Free", - PremiumPerks: t.PremiumPerks{ - AdFree: false, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 20, - ValidatorGroupsPerDashboard: 1, - ShareCustomDashboards: false, - ManageDashboardViaApi: false, - BulkAdding: false, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 0, - Hourly: 12 * hour, - Daily: 0, - Weekly: 0, - }, - EmailNotificationsPerDay: 5, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 1, - WebhookEndpoints: 1, - MobileAppCustomThemes: false, - MobileAppWidget: false, - MonitorMachines: 1, - MachineMonitoringHistorySeconds: 3600 * 3, - NotificationsMachineCustomThreshold: false, - NotificationsValidatorDashboardRealTimeMode: false, - NotificationsValidatorDashboardGroupOffline: false, - }, - PricePerMonthEur: 0, - PricePerYearEur: 0, - ProductIdMonthly: "premium_free", - ProductIdYearly: "premium_free.yearly", + return db.GetUserInfo(ctx, userId, d.userReader) } func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { - // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable - return &t.ProductSummary{ - ValidatorsPerDashboardLimit: 102_000, - StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, - ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet - { - ProductId: "api_free", - ProductName: "Free", - PricePerMonthEur: 0, - PricePerYearEur: 0 * 12, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 10, - UnitsPerMonth: 10_000_000, - ApiKeys: 2, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "iron", - ProductName: "Iron", - PricePerMonthEur: 1.99, - PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 20, - UnitsPerMonth: 20_000_000, - ApiKeys: 10, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "silver", - ProductName: "Silver", - PricePerMonthEur: 2.99, - PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 30, - UnitsPerMonth: 100_000_000, - ApiKeys: 20, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - { - ProductId: "gold", - ProductName: "Gold", - PricePerMonthEur: 3.99, - PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, - ApiPerks: t.ApiPerks{ - UnitsPerSecond: 40, - UnitsPerMonth: 200_000_000, - ApiKeys: 40, - ConsensusLayerAPI: true, - ExecutionLayerAPI: true, - Layer2API: true, - NoAds: true, - DiscordSupport: false, - }, - }, - }, - PremiumProducts: []t.PremiumProduct{ - freeTierProduct, - { - ProductName: "Guppy", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 1, - ValidatorsPerDashboard: 100, - ValidatorGroupsPerDashboard: 3, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: day, - Hourly: 7 * day, - Daily: month, - Weekly: 0, - }, - EmailNotificationsPerDay: 15, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 3, - WebhookEndpoints: 3, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 2, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 9.99, - PricePerYearEur: 107.88, - ProductIdMonthly: "guppy", - ProductIdYearly: "guppy.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, - StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, - }, - { - ProductName: "Dolphin", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 300, - ValidatorGroupsPerDashboard: 10, - ShareCustomDashboards: true, - ManageDashboardViaApi: false, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 5 * day, - Hourly: month, - Daily: 2 * month, - Weekly: 8 * week, - }, - EmailNotificationsPerDay: 20, - ConfigureNotificationsViaApi: false, - ValidatorGroupNotifications: 10, - WebhookEndpoints: 10, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 29.99, - PricePerYearEur: 311.88, - ProductIdMonthly: "dolphin", - ProductIdYearly: "dolphin.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, - StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, - }, - { - ProductName: "Orca", - PremiumPerks: t.PremiumPerks{ - AdFree: true, - ValidatorDashboards: 2, - ValidatorsPerDashboard: 1000, - ValidatorGroupsPerDashboard: 30, - ShareCustomDashboards: true, - ManageDashboardViaApi: true, - BulkAdding: true, - ChartHistorySeconds: t.ChartHistorySeconds{ - Epoch: 3 * week, - Hourly: 6 * month, - Daily: 12 * month, - Weekly: maxJsInt, - }, - EmailNotificationsPerDay: 50, - ConfigureNotificationsViaApi: true, - ValidatorGroupNotifications: 60, - WebhookEndpoints: 30, - MobileAppCustomThemes: true, - MobileAppWidget: true, - MonitorMachines: 10, - MachineMonitoringHistorySeconds: 3600 * 24 * 30, - NotificationsMachineCustomThreshold: true, - NotificationsValidatorDashboardRealTimeMode: true, - NotificationsValidatorDashboardGroupOffline: true, - }, - PricePerMonthEur: 49.99, - PricePerYearEur: 479.88, - ProductIdMonthly: "orca", - ProductIdYearly: "orca.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, - StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, - IsPopular: true, - }, - }, - ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ - { - ProductName: "1k extra valis per dashboard", - ExtraDashboardValidators: 1000, - PricePerMonthEur: 74.99, - PricePerYearEur: 719.88, - ProductIdMonthly: "vdb_addon_1k", - ProductIdYearly: "vdb_addon_1k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, - }, - { - ProductName: "10k extra valis per dashboard", - ExtraDashboardValidators: 10000, - PricePerMonthEur: 449.99, - PricePerYearEur: 4319.88, - ProductIdMonthly: "vdb_addon_10k", - ProductIdYearly: "vdb_addon_10k.yearly", - StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, - StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, - }, - }, - }, nil + return db.GetProductSummary(ctx) } func (d *DataAccessService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - return &freeTierProduct.PremiumPerks, nil + return db.GetFreeTierPerks(ctx) } func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { diff --git a/backend/pkg/api/data_access/vdb.go b/backend/pkg/api/data_access/vdb.go index da907a294..d15a3eb1a 100644 --- a/backend/pkg/api/data_access/vdb.go +++ b/backend/pkg/api/data_access/vdb.go @@ -76,7 +76,6 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardRocketPool(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBRocketPoolColumn], search string, limit uint64) ([]t.VDBRocketPoolTableRow, *t.Paging, error) GetValidatorDashboardTotalRocketPool(ctx context.Context, dashboardId t.VDBId, search string) (*t.VDBRocketPoolTableRow, error) - GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) diff --git a/backend/pkg/api/data_access/vdb_blocks.go b/backend/pkg/api/data_access/vdb_blocks.go index ca881c0fb..ca4a06f88 100644 --- a/backend/pkg/api/data_access/vdb_blocks.go +++ b/backend/pkg/api/data_access/vdb_blocks.go @@ -284,15 +284,8 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das ) ` - distinct := "" - if !onlyPrimarySort { - distinct = sortColName - } from := `past_blocks ` - selectStr := `SELECT * FROM ` + from - if len(distinct) > 0 { - selectStr = `SELECT DISTINCT ON (` + distinct + `) * FROM ` + from - } + selectStr := `SELECT * FROM ` query := selectStr + from + where + orderBy + limitStr // supply scheduled proposals, if any @@ -325,11 +318,11 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das `, len(params)-2) } cte += `) ` - if len(distinct) != 0 { - distinct += ", " + distinct := "slot" + if !onlyPrimarySort { + distinct = sortColName + ", " + distinct } // keep all ordering, sorting etc - distinct += "slot" selectStr = `SELECT DISTINCT ON (` + distinct + `) * FROM ` // encapsulate past blocks query to ensure performance from = `( @@ -388,11 +381,11 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das } graffiti := proposal.GraffitiText data[i].Graffiti = &graffiti + block := uint64(proposal.Block.Int64) + data[i].Block = &block if proposal.Status == 3 { continue } - block := uint64(proposal.Block.Int64) - data[i].Block = &block var reward t.ClElValue[decimal.Decimal] if proposal.ElReward.Valid { rewardRecp := t.Address{ diff --git a/backend/pkg/api/data_access/vdb_helpers.go b/backend/pkg/api/data_access/vdb_helpers.go index 12bfe2c46..30fd72f61 100644 --- a/backend/pkg/api/data_access/vdb_helpers.go +++ b/backend/pkg/api/data_access/vdb_helpers.go @@ -41,28 +41,6 @@ func (d DataAccessService) getDashboardValidators(ctx context.Context, dashboard return dashboardId.Validators, nil } -func (d DataAccessService) calculateTotalEfficiency(attestationEff, proposalEff, syncEff sql.NullFloat64) float64 { - efficiency := float64(0) - - if !attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { - efficiency = 0 - } else if attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { - efficiency = attestationEff.Float64 * 100.0 - } else if attestationEff.Valid && proposalEff.Valid && !syncEff.Valid { - efficiency = ((56.0 / 64.0 * attestationEff.Float64) + (8.0 / 64.0 * proposalEff.Float64)) * 100.0 - } else if attestationEff.Valid && !proposalEff.Valid && syncEff.Valid { - efficiency = ((62.0 / 64.0 * attestationEff.Float64) + (2.0 / 64.0 * syncEff.Float64)) * 100.0 - } else { - efficiency = (((54.0 / 64.0) * attestationEff.Float64) + ((8.0 / 64.0) * proposalEff.Float64) + ((2.0 / 64.0) * syncEff.Float64)) * 100.0 - } - - if efficiency < 0 { - efficiency = 0 - } - - return efficiency -} - func (d DataAccessService) calculateChartEfficiency(efficiencyType enums.VDBSummaryChartEfficiencyType, row *t.VDBValidatorSummaryChartRow) (float64, error) { efficiency := float64(0) switch efficiencyType { @@ -81,7 +59,7 @@ func (d DataAccessService) calculateChartEfficiency(efficiencyType enums.VDBSumm syncEfficiency.Valid = true } - efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) case enums.VDBSummaryChartAttestation: if row.AttestationIdealReward > 0 { efficiency = (row.AttestationReward / row.AttestationIdealReward) * 100 diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index cbf0a4b64..d152d6cac 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -520,7 +520,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d syncEfficiency.Float64 = float64(queryResult.SyncExecuted) / float64(queryResult.SyncScheduled) syncEfficiency.Valid = true } - *efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + *efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) return nil }) diff --git a/backend/pkg/api/data_access/vdb_rocket_pool.go b/backend/pkg/api/data_access/vdb_rocket_pool.go index fc2cb963a..90a94d3af 100644 --- a/backend/pkg/api/data_access/vdb_rocket_pool.go +++ b/backend/pkg/api/data_access/vdb_rocket_pool.go @@ -17,11 +17,6 @@ func (d *DataAccessService) GetValidatorDashboardTotalRocketPool(ctx context.Con return d.dummy.GetValidatorDashboardTotalRocketPool(ctx, dashboardId, search) } -func (d *DataAccessService) GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) { - // TODO @DATA-ACCESS - return d.dummy.GetValidatorDashboardNodeRocketPool(ctx, dashboardId, node) -} - func (d *DataAccessService) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) { // TODO @DATA-ACCESS return d.dummy.GetValidatorDashboardRocketPoolMinipools(ctx, dashboardId, node, cursor, colSort, search, limit) diff --git a/backend/pkg/api/data_access/vdb_summary.go b/backend/pkg/api/data_access/vdb_summary.go index 58e29ad2a..f4216ec7b 100644 --- a/backend/pkg/api/data_access/vdb_summary.go +++ b/backend/pkg/api/data_access/vdb_summary.go @@ -90,7 +90,7 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da if err != nil { return nil, nil, err } - averageNetworkEfficiency := d.calculateTotalEfficiency( + averageNetworkEfficiency := utils.CalculateTotalEfficiency( efficiency.AttestationEfficiency[period], efficiency.ProposalEfficiency[period], efficiency.SyncEfficiency[period]) // ------------------------------------------------------------------------------------------------------------------ @@ -366,7 +366,7 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da syncEfficiency.Float64 = float64(queryEntry.SyncExecuted) / float64(queryEntry.SyncScheduled) syncEfficiency.Valid = true } - resultEntry.Efficiency = d.calculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + resultEntry.Efficiency = utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) // Add the duties info to the total total.AttestationReward = total.AttestationReward.Add(queryEntry.AttestationReward) @@ -486,7 +486,7 @@ func (d *DataAccessService) GetValidatorDashboardSummary(ctx context.Context, da totalSyncEfficiency.Float64 = float64(total.SyncExecuted) / float64(total.SyncScheduled) totalSyncEfficiency.Valid = true } - totalEntry.Efficiency = d.calculateTotalEfficiency(totalAttestationEfficiency, totalProposerEfficiency, totalSyncEfficiency) + totalEntry.Efficiency = utils.CalculateTotalEfficiency(totalAttestationEfficiency, totalProposerEfficiency, totalSyncEfficiency) result = append([]t.VDBSummaryTableRow{totalEntry}, result...) } @@ -867,8 +867,8 @@ func (d *DataAccessService) internal_getElClAPR(ctx context.Context, dashboardId if err != nil { return decimal.Zero, 0, decimal.Zero, 0, err } - elIncomeFloat, _ := elIncome.Float64() - elAPR = ((elIncomeFloat / float64(aprDivisor)) / (float64(32e18) * float64(rewardsResultTable.ValidatorCount))) * 24.0 * 365.0 * 100.0 + elIncomeFloat, _ := elIncome.Float64() // EL income is in ETH + elAPR = ((elIncomeFloat / float64(aprDivisor)) / (float64(32) * float64(rewardsResultTable.ValidatorCount))) * 24.0 * 365.0 * 100.0 if math.IsNaN(elAPR) { elAPR = 0 } @@ -1021,7 +1021,7 @@ func (d *DataAccessService) GetValidatorDashboardSummaryChart(ctx context.Contex if err != nil { return nil, err } - averageNetworkEfficiency := d.calculateTotalEfficiency( + averageNetworkEfficiency := utils.CalculateTotalEfficiency( efficiency.AttestationEfficiency[enums.Last24h], efficiency.ProposalEfficiency[enums.Last24h], efficiency.SyncEfficiency[enums.Last24h]) for ts := range tsMap { @@ -1093,17 +1093,17 @@ func (d *DataAccessService) GetLatestExportedChartTs(ctx context.Context, aggreg var dateColumn string switch aggregation { case enums.IntervalEpoch: - table = "validator_dashboard_data_epoch" - dateColumn = "epoch_timestamp" + table = "view_validator_dashboard_data_epoch_max_ts" + dateColumn = "t" case enums.IntervalHourly: - table = "validator_dashboard_data_hourly" - dateColumn = "hour" + table = "view_validator_dashboard_data_hourly_max_ts" + dateColumn = "t" case enums.IntervalDaily: - table = "validator_dashboard_data_daily" - dateColumn = "day" + table = "view_validator_dashboard_data_daily_max_ts" + dateColumn = "t" case enums.IntervalWeekly: - table = "validator_dashboard_data_weekly" - dateColumn = "week" + table = "view_validator_dashboard_data_weekly_max_ts" + dateColumn = "t" default: return 0, fmt.Errorf("unexpected aggregation type: %v", aggregation) } diff --git a/backend/pkg/api/enums/enums.go b/backend/pkg/api/enums/enums.go index a0bd6997d..95ccd0edb 100644 --- a/backend/pkg/api/enums/enums.go +++ b/backend/pkg/api/enums/enums.go @@ -104,7 +104,6 @@ const ( Last24h Last7d Last30d - Last365d ) func (t TimePeriod) Int() int { @@ -123,27 +122,23 @@ func (TimePeriod) NewFromString(s string) TimePeriod { return Last7d case "last_30d": return Last30d - case "last_365d": - return Last365d default: return TimePeriod(-1) } } var TimePeriods = struct { - AllTime TimePeriod - Last1h TimePeriod - Last24h TimePeriod - Last7d TimePeriod - Last30d TimePeriod - Last365d TimePeriod + AllTime TimePeriod + Last1h TimePeriod + Last24h TimePeriod + Last7d TimePeriod + Last30d TimePeriod }{ AllTime, Last1h, Last24h, Last7d, Last30d, - Last365d, } func (t TimePeriod) Duration() time.Duration { @@ -157,8 +152,6 @@ func (t TimePeriod) Duration() time.Duration { return 7 * day case Last30d: return 30 * day - case Last365d: - return 365 * day default: return 0 } diff --git a/backend/pkg/api/enums/notifications_enums.go b/backend/pkg/api/enums/notifications_enums.go index 1fb78529e..033141edd 100644 --- a/backend/pkg/api/enums/notifications_enums.go +++ b/backend/pkg/api/enums/notifications_enums.go @@ -166,7 +166,7 @@ func (NotificationClientsColumn) NewFromString(s string) NotificationClientsColu func (c NotificationClientsColumn) ToString() string { switch c { case NotificationClientName: - return "client_name" + return "client" case NotificationClientTimestamp: return "epoch" default: diff --git a/backend/pkg/api/enums/validator_dashboard_enums.go b/backend/pkg/api/enums/validator_dashboard_enums.go index fcbfec71b..58e87f337 100644 --- a/backend/pkg/api/enums/validator_dashboard_enums.go +++ b/backend/pkg/api/enums/validator_dashboard_enums.go @@ -270,6 +270,61 @@ var VDBManageValidatorsColumns = struct { VDBManageValidatorsWithdrawalCredential, } +// ---------------- +// Validator Dashboard Manage Validators Table + +type VDBMobileValidatorsColumn int + +var _ EnumFactory[VDBMobileValidatorsColumn] = VDBMobileValidatorsColumn(0) + +const ( + VDBMobileValidatorsIndex VDBMobileValidatorsColumn = iota + VDBMobileValidatorsPublicKey + VDBMobileValidatorsBalance + VDBMobileValidatorsStatus + VDBMobileValidatorsWithdrawalCredential + VDBMobileValidatorsEfficiency +) + +func (c VDBMobileValidatorsColumn) Int() int { + return int(c) +} + +func (VDBMobileValidatorsColumn) NewFromString(s string) VDBMobileValidatorsColumn { + switch s { + case "index": + return VDBMobileValidatorsIndex + case "public_key": + return VDBMobileValidatorsPublicKey + case "balance": + return VDBMobileValidatorsBalance + case "status": + return VDBMobileValidatorsStatus + case "withdrawal_credential": + return VDBMobileValidatorsWithdrawalCredential + case "efficiency": + return VDBMobileValidatorsEfficiency + default: + return VDBMobileValidatorsColumn(-1) + } +} + +var VDBMobileValidatorsColumns = struct { + Index VDBManageValidatorsColumn + PublicKey VDBManageValidatorsColumn + Balance VDBManageValidatorsColumn + Status VDBManageValidatorsColumn + WithdrawalCredential VDBManageValidatorsColumn + Efficiency VDBMobileValidatorsColumn +}{ + VDBManageValidatorsIndex, + VDBManageValidatorsPublicKey, + VDBManageValidatorsBalance, + VDBManageValidatorsStatus, + VDBManageValidatorsWithdrawalCredential, + VDBMobileValidatorsEfficiency, +} + // ---------------- // Validator Dashboard Archived Reasons diff --git a/backend/pkg/api/handlers/auth.go b/backend/pkg/api/handlers/auth.go index 777b84ca9..3f3c2f3b3 100644 --- a/backend/pkg/api/handlers/auth.go +++ b/backend/pkg/api/handlers/auth.go @@ -3,14 +3,10 @@ package handlers import ( "cmp" "context" - "crypto/sha256" - "encoding/binary" - "encoding/hex" "errors" "fmt" "html" "net/http" - "strconv" "strings" "time" @@ -41,7 +37,7 @@ const authEmailExpireTime = time.Minute * 30 type ctxKey string const ctxUserIdKey ctxKey = "user_id" -const ctxIsMockEnabledKey ctxKey = "is_mock_enabled" +const ctxIsMockedKey ctxKey = "is_mocked" var errBadCredentials = newUnauthorizedErr("invalid email or password") @@ -86,7 +82,7 @@ func (h *HandlerService) purgeAllSessionsForUser(ctx context.Context, userId uin // TODO move to service? func (h *HandlerService) sendConfirmationEmail(ctx context.Context, userId uint64, email string) error { // 1. check last confirmation time to enforce ratelimit - lastTs, err := h.dai.GetEmailConfirmationTime(ctx, userId) + lastTs, err := h.daService.GetEmailConfirmationTime(ctx, userId) if err != nil { return errors.New("error getting confirmation-ts") } @@ -96,7 +92,7 @@ func (h *HandlerService) sendConfirmationEmail(ctx context.Context, userId uint6 // 2. update confirmation hash (before sending so there's no hash mismatch on failure) confirmationHash := utils.RandomString(40) - err = h.dai.UpdateEmailConfirmationHash(ctx, userId, email, confirmationHash) + err = h.daService.UpdateEmailConfirmationHash(ctx, userId, email, confirmationHash) if err != nil { return errors.New("error updating confirmation hash") } @@ -117,7 +113,7 @@ Best regards, } // 4. update confirmation time (only after mail was sent) - err = h.dai.UpdateEmailConfirmationTime(ctx, userId) + err = h.daService.UpdateEmailConfirmationTime(ctx, userId) if err != nil { // shouldn't present this as error to user, confirmation works fine log.Error(err, "error updating email confirmation time, rate limiting won't be enforced", 0, nil) @@ -129,7 +125,7 @@ Best regards, func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint64, email string) error { // 0. check if password resets are allowed // (can be forbidden by admin (not yet in v2)) - passwordResetAllowed, err := h.dai.IsPasswordResetAllowed(ctx, userId) + passwordResetAllowed, err := h.daService.IsPasswordResetAllowed(ctx, userId) if err != nil { return err } @@ -138,7 +134,7 @@ func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint } // 1. check last confirmation time to enforce ratelimit - lastTs, err := h.dai.GetPasswordResetTime(ctx, userId) + lastTs, err := h.daService.GetPasswordResetTime(ctx, userId) if err != nil { return errors.New("error getting confirmation-ts") } @@ -148,7 +144,7 @@ func (h *HandlerService) sendPasswordResetEmail(ctx context.Context, userId uint // 2. update reset hash (before sending so there's no hash mismatch on failure) resetHash := utils.RandomString(40) - err = h.dai.UpdatePasswordResetHash(ctx, userId, resetHash) + err = h.daService.UpdatePasswordResetHash(ctx, userId, resetHash) if err != nil { return errors.New("error updating confirmation hash") } @@ -169,7 +165,7 @@ Best regards, } // 4. update reset time (only after mail was sent) - err = h.dai.UpdatePasswordResetTime(ctx, userId) + err = h.daService.UpdatePasswordResetTime(ctx, userId) if err != nil { // shouldn't present this as error to user, reset works fine log.Error(err, "error updating password reset time, rate limiting won't be enforced", 0, nil) @@ -198,7 +194,7 @@ func (h *HandlerService) GetUserIdByApiKey(r *http.Request) (uint64, error) { if apiKey == "" { return 0, newUnauthorizedErr("missing api key") } - userId, err := h.dai.GetUserIdByApiKey(r.Context(), apiKey) + userId, err := h.daService.GetUserIdByApiKey(r.Context(), apiKey) if errors.Is(err, dataaccess.ErrNotFound) { err = newUnauthorizedErr("api key not found") } @@ -247,7 +243,7 @@ func (h *HandlerService) InternalPostUsers(w http.ResponseWriter, r *http.Reques return } - _, err := h.dai.GetUserByEmail(r.Context(), email) + _, err := h.daService.GetUserByEmail(r.Context(), email) if !errors.Is(err, dataaccess.ErrNotFound) { if err == nil { returnConflict(w, r, errors.New("email already registered")) @@ -270,7 +266,7 @@ func (h *HandlerService) InternalPostUsers(w http.ResponseWriter, r *http.Reques } // add user - userId, err := h.dai.CreateUser(r.Context(), email, string(passwordHash)) + userId, err := h.daService.CreateUser(r.Context(), email, string(passwordHash)) if err != nil { handleErr(w, r, err) return @@ -295,12 +291,12 @@ func (h *HandlerService) InternalPostUserConfirm(w http.ResponseWriter, r *http. return } - userId, err := h.dai.GetUserIdByConfirmationHash(r.Context(), confirmationHash) + userId, err := h.daService.GetUserIdByConfirmationHash(r.Context(), confirmationHash) if err != nil { handleErr(w, r, err) return } - confirmationTime, err := h.dai.GetEmailConfirmationTime(r.Context(), userId) + confirmationTime, err := h.daService.GetEmailConfirmationTime(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -310,7 +306,7 @@ func (h *HandlerService) InternalPostUserConfirm(w http.ResponseWriter, r *http. return } - err = h.dai.UpdateUserEmail(r.Context(), userId) + err = h.daService.UpdateUserEmail(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -342,7 +338,7 @@ func (h *HandlerService) InternalPostUserPasswordReset(w http.ResponseWriter, r return } - userId, err := h.dai.GetUserByEmail(r.Context(), email) + userId, err := h.daService.GetUserByEmail(r.Context(), email) if err != nil { if err == dataaccess.ErrNotFound { // don't leak if email is registered @@ -380,12 +376,12 @@ func (h *HandlerService) InternalPostUserPasswordResetHash(w http.ResponseWriter } // check token validity - userId, err := h.dai.GetUserIdByResetHash(r.Context(), resetToken) + userId, err := h.daService.GetUserIdByResetHash(r.Context(), resetToken) if err != nil { handleErr(w, r, err) return } - resetTime, err := h.dai.GetPasswordResetTime(r.Context(), userId) + resetTime, err := h.daService.GetPasswordResetTime(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -401,20 +397,20 @@ func (h *HandlerService) InternalPostUserPasswordResetHash(w http.ResponseWriter handleErr(w, r, errors.New("error hashing password")) return } - err = h.dai.UpdateUserPassword(r.Context(), userId, string(passwordHash)) + err = h.daService.UpdateUserPassword(r.Context(), userId, string(passwordHash)) if err != nil { handleErr(w, r, err) return } // if email is not confirmed, confirm since they clicked a link emailed to them - userInfo, err := h.dai.GetUserCredentialInfo(r.Context(), userId) + userInfo, err := h.daService.GetUserCredentialInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } if !userInfo.EmailConfirmed { - err = h.dai.UpdateUserEmail(r.Context(), userId) + err = h.daService.UpdateUserEmail(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -449,7 +445,7 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques } // fetch user - userId, err := h.dai.GetUserByEmail(r.Context(), email) + userId, err := h.daService.GetUserByEmail(r.Context(), email) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -457,7 +453,7 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques handleErr(w, r, err) return } - user, err := h.dai.GetUserCredentialInfo(r.Context(), userId) + user, err := h.daService.GetUserCredentialInfo(r.Context(), userId) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -532,7 +528,7 @@ func (h *HandlerService) InternalPostMobileAuthorize(w http.ResponseWriter, r *h } // check if oauth app exists to validate whether redirect uri is valid - appInfo, err := h.dai.GetAppDataFromRedirectUri(req.RedirectURI) + appInfo, err := h.daService.GetAppDataFromRedirectUri(req.RedirectURI) if err != nil { callback := req.RedirectURI + "?error=invalid_request&error_description=missing_redirect_uri" + state http.Redirect(w, r, callback, http.StatusSeeOther) @@ -549,7 +545,7 @@ func (h *HandlerService) InternalPostMobileAuthorize(w http.ResponseWriter, r *h session := h.scs.Token(r.Context()) sanitizedDeviceName := html.EscapeString(clientName) - err = h.dai.AddUserDevice(userInfo.Id, utils.HashAndEncode(session+session), clientID, sanitizedDeviceName, appInfo.ID) + err = h.daService.AddUserDevice(userInfo.Id, utils.HashAndEncode(session+session), clientID, sanitizedDeviceName, appInfo.ID) if err != nil { log.Warnf("Error adding user device: %v", err) callback := req.RedirectURI + "?error=invalid_request&error_description=server_error" + state @@ -589,7 +585,7 @@ func (h *HandlerService) InternalPostMobileEquivalentExchange(w http.ResponseWri } // Get user info - user, err := h.dai.GetUserCredentialInfo(r.Context(), userID) + user, err := h.daService.GetUserCredentialInfo(r.Context(), userID) if err != nil { if errors.Is(err, dataaccess.ErrNotFound) { err = errBadCredentials @@ -612,7 +608,7 @@ func (h *HandlerService) InternalPostMobileEquivalentExchange(w http.ResponseWri // invalidate old refresh token and replace with hashed session id sanitizedDeviceName := html.EscapeString(req.DeviceName) - err = h.dai.MigrateMobileSession(refreshTokenHashed, utils.HashAndEncode(session+session), req.DeviceID, sanitizedDeviceName) // salted with session + err = h.daService.MigrateMobileSession(refreshTokenHashed, utils.HashAndEncode(session+session), req.DeviceID, sanitizedDeviceName) // salted with session if err != nil { handleErr(w, r, err) return @@ -653,7 +649,7 @@ func (h *HandlerService) InternalPostUsersMeNotificationSettingsPairedDevicesTok return } - err = h.dai.AddMobileNotificationToken(user.Id, deviceID, req.Token) + err = h.daService.AddMobileNotificationToken(user.Id, deviceID, req.Token) if err != nil { handleErr(w, r, err) return @@ -693,7 +689,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * return } - subscriptionCount, err := h.dai.GetAppSubscriptionCount(user.Id) + subscriptionCount, err := h.daService.GetAppSubscriptionCount(user.Id) if err != nil { handleErr(w, r, err) return @@ -724,7 +720,7 @@ func (h *HandlerService) InternalHandleMobilePurchase(w http.ResponseWriter, r * } } - err = h.dai.AddMobilePurchase(nil, user.Id, req, validationResult, "") + err = h.daService.AddMobilePurchase(nil, user.Id, req, validationResult, "") if err != nil { handleErr(w, r, err) return @@ -755,7 +751,7 @@ func (h *HandlerService) InternalDeleteUser(w http.ResponseWriter, r *http.Reque } // TODO allow if user has any subsciptions etc? - err = h.dai.RemoveUser(r.Context(), user.Id) + err = h.daService.RemoveUser(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -777,7 +773,7 @@ func (h *HandlerService) InternalPostUserEmail(w http.ResponseWriter, r *http.Re handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserCredentialInfo(r.Context(), user.Id) + userInfo, err := h.daService.GetUserCredentialInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -809,7 +805,7 @@ func (h *HandlerService) InternalPostUserEmail(w http.ResponseWriter, r *http.Re return } - _, err = h.dai.GetUserByEmail(r.Context(), newEmail) + _, err = h.daService.GetUserByEmail(r.Context(), newEmail) if !errors.Is(err, dataaccess.ErrNotFound) { if err == nil { handleErr(w, r, newConflictErr("email already registered")) @@ -856,7 +852,7 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. return } // user doesn't contain password, fetch from db - userData, err := h.dai.GetUserCredentialInfo(r.Context(), user.Id) + userData, err := h.daService.GetUserCredentialInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -892,7 +888,7 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. } // change password - err = h.dai.UpdateUserPassword(r.Context(), user.Id, string(passwordHash)) + err = h.daService.UpdateUserPassword(r.Context(), user.Id, string(passwordHash)) if err != nil { handleErr(w, r, err) return @@ -906,187 +902,3 @@ func (h *HandlerService) InternalPutUserPassword(w http.ResponseWriter, r *http. returnNoContent(w, r) } - -// Middlewares - -func hashUint64(data uint64) [32]byte { - // Convert uint64 to a byte slice - buf := make([]byte, 8) - binary.LittleEndian.PutUint64(buf, data) - - // Compute SHA-256 hash - hash := sha256.Sum256(buf) - return hash -} - -func checkHash(data uint64, hashStr string) bool { - // Decode the hexadecimal string into a byte slice - hashToCheck, err := hex.DecodeString(hashStr) - if err != nil { - return false - } - - // Hash the uint64 value - computedHash := hashUint64(data) - - // Compare the computed hash with the provided hash - return string(computedHash[:]) == string(hashToCheck) -} - -// returns a middleware that stores user id in context, using the provided function -func StoreUserIdMiddleware(next http.Handler, userIdFunc func(r *http.Request) (uint64, error)) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userId, err := userIdFunc(r) - if err != nil { - if errors.Is(err, errUnauthorized) { - // if next handler requires authentication, it should return 'unauthorized' itself - next.ServeHTTP(w, r) - } else { - handleErr(w, r, err) - } - return - } - - // if user id matches a given hash, allow access without checking dashboard access and return mock data - // TODO: move to config, exposing this in source code is a minor security risk for now - validHashes := []string{ - "2cab06069254b5555b617efa1d17f0748324270bb587b73422e6840d59ff322c", - "fc624cf355b84bc583661552982894621568b59c0a1c92ab0c1e03ed3bbf649b", - "03e7fb02cbc33eb45e98ab50b4bcad7fc338e5edfb5eca33ad9eb7d13d4ff106", - } - for _, hash := range validHashes { - if checkHash(userId, hash) { - ctx := r.Context() - ctx = context.WithValue(ctx, ctxIsMockEnabledKey, true) - r = r.WithContext(ctx) - } - } - ctx := r.Context() - ctx = context.WithValue(ctx, ctxUserIdKey, userId) - r = r.WithContext(ctx) - next.ServeHTTP(w, r) - }) -} - -func (h *HandlerService) StoreUserIdBySessionMiddleware(next http.Handler) http.Handler { - return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { - return h.GetUserIdBySession(r) - }) -} - -func (h *HandlerService) StoreUserIdByApiKeyMiddleware(next http.Handler) http.Handler { - return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { - return h.GetUserIdByApiKey(r) - }) -} - -// returns a middleware that checks if user has access to dashboard when a primary id is used -func (h *HandlerService) VDBAuthMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // if mock data is used, no need to check access - if isMockEnabled, ok := r.Context().Value(ctxIsMockEnabledKey).(bool); ok && isMockEnabled { - next.ServeHTTP(w, r) - return - } - var err error - dashboardId, err := strconv.ParseUint(mux.Vars(r)["dashboard_id"], 10, 64) - if err != nil { - // if primary id is not used, no need to check access - next.ServeHTTP(w, r) - return - } - // primary id is used -> user needs to have access to dashboard - - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - - // store user id in context - ctx := r.Context() - ctx = context.WithValue(ctx, ctxUserIdKey, userId) - r = r.WithContext(ctx) - - dashboardUser, err := h.dai.GetValidatorDashboardUser(r.Context(), types.VDBIdPrimary(dashboardId)) - if err != nil { - handleErr(w, r, err) - return - } - - if dashboardUser.UserId != userId { - // user does not have access to dashboard - // the proper error would be 403 Forbidden, but we don't want to leak information so we return 404 Not Found - handleErr(w, r, newNotFoundErr("dashboard with id %v not found", dashboardId)) - return - } - - next.ServeHTTP(w, r) - }) -} - -// Common middleware logic for checking user premium perks -func (h *HandlerService) PremiumPerkCheckMiddleware(next http.Handler, hasRequiredPerk func(premiumPerks types.PremiumPerks) bool) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // get user id from context - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - - // get user info - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - - // check if user has the required premium perk - if !hasRequiredPerk(userInfo.PremiumPerks) { - handleErr(w, r, newForbiddenErr("users premium perks do not allow usage of this endpoint")) - return - } - - next.ServeHTTP(w, r) - }) -} - -// Middleware for managing dashboards via API -func (h *HandlerService) ManageDashboardsViaApiCheckMiddleware(next http.Handler) http.Handler { - return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { - return premiumPerks.ManageDashboardViaApi - }) -} - -// Middleware for managing notifications via API -func (h *HandlerService) ManageNotificationsViaApiCheckMiddleware(next http.Handler) http.Handler { - return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { - return premiumPerks.ConfigureNotificationsViaApi - }) -} - -// middleware check to return if specified dashboard is not archived (and accessible) -func (h *HandlerService) VDBArchivedCheckMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - if len(dashboardId.Validators) > 0 { - next.ServeHTTP(w, r) - return - } - dashboard, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId.Id) - if err != nil { - handleErr(w, r, err) - return - } - if dashboard.IsArchived { - handleErr(w, r, newForbiddenErr("dashboard with id %v is archived", dashboardId)) - return - } - next.ServeHTTP(w, r) - }) -} diff --git a/backend/pkg/api/handlers/backward_compat.go b/backend/pkg/api/handlers/backward_compat.go index f02161a00..e9d823c69 100644 --- a/backend/pkg/api/handlers/backward_compat.go +++ b/backend/pkg/api/handlers/backward_compat.go @@ -43,7 +43,7 @@ func (h *HandlerService) getTokenByRefresh(r *http.Request, refreshToken string) log.Infof("refresh token: %v, claims: %v, hashed refresh: %v", refreshToken, unsafeClaims, refreshTokenHashed) // confirm all claims via db lookup and refreshtoken check - userID, err := h.dai.GetUserIdByRefreshToken(unsafeClaims.UserID, unsafeClaims.AppID, unsafeClaims.DeviceID, refreshTokenHashed) + userID, err := h.daService.GetUserIdByRefreshToken(unsafeClaims.UserID, unsafeClaims.AppID, unsafeClaims.DeviceID, refreshTokenHashed) if err != nil { if err == sql.ErrNoRows { return 0, "", dataaccess.ErrNotFound diff --git a/backend/pkg/api/handlers/handler_service.go b/backend/pkg/api/handlers/handler_service.go new file mode 100644 index 000000000..71a43e9ad --- /dev/null +++ b/backend/pkg/api/handlers/handler_service.go @@ -0,0 +1,547 @@ +package handlers + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "strconv" + + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/invopop/jsonschema" + + "github.com/alexedwards/scs/v2" + dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/services" + types "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type HandlerService struct { + daService dataaccess.DataAccessor + daDummy dataaccess.DataAccessor + scs *scs.SessionManager + isPostMachineMetricsEnabled bool // if more config options are needed, consider having the whole config in here +} + +func NewHandlerService(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAccessor, sessionManager *scs.SessionManager, enablePostMachineMetrics bool) *HandlerService { + if allNetworks == nil { + networks, err := dataAccessor.GetAllNetworks() + if err != nil { + log.Fatal(err, "error getting networks for handler", 0, nil) + } + allNetworks = networks + } + + return &HandlerService{ + daService: dataAccessor, + daDummy: dummy, + scs: sessionManager, + isPostMachineMetricsEnabled: enablePostMachineMetrics, + } +} + +// getDataAccessor returns the correct data accessor based on the request context. +// if the request is mocked, the data access dummy is returned; otherwise the data access service. +// should only be used if getting mocked data for the endpoint is appropriate +func (h *HandlerService) getDataAccessor(r *http.Request) dataaccess.DataAccessor { + if isMocked(r) { + return h.daDummy + } + return h.daService +} + +// all networks available in the system, filled on startup in NewHandlerService +var allNetworks []types.NetworkInfo + +// -------------------------------------- +// errors + +var ( + errMsgParsingId = errors.New("error parsing parameter 'dashboard_id'") + errBadRequest = errors.New("bad request") + errInternalServer = errors.New("internal server error") + errUnauthorized = errors.New("unauthorized") + errForbidden = errors.New("forbidden") + errConflict = errors.New("conflict") + errTooManyRequests = errors.New("too many requests") + errGone = errors.New("gone") +) + +// -------------------------------------- +// utility functions + +type validatorSet struct { + Indexes []types.VDBValidator + PublicKeys []string +} + +// parseDashboardId is a helper function to validate the string dashboard id param. +func parseDashboardId(id string) (interface{}, error) { + var v validationError + if reInteger.MatchString(id) { + // given id is a normal id + id := v.checkUint(id, "dashboard_id") + if v.hasErrors() { + return nil, v + } + return types.VDBIdPrimary(id), nil + } + if reValidatorDashboardPublicId.MatchString(id) { + // given id is a public id + return types.VDBIdPublic(id), nil + } + // given id must be an encoded set of validators + decodedId, err := base64.RawURLEncoding.DecodeString(id) + if err != nil { + return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) + } + indexes, publicKeys := v.checkValidatorList(string(decodedId), forbidEmpty) + if v.hasErrors() { + return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) + } + return validatorSet{Indexes: indexes, PublicKeys: publicKeys}, nil +} + +// getDashboardId is a helper function to convert the dashboard id param to a VDBId. +// precondition: dashboardIdParam must be a valid dashboard id and either a primary id, public id, or list of validators. +func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam interface{}) (*types.VDBId, error) { + switch dashboardId := dashboardIdParam.(type) { + case types.VDBIdPrimary: + return &types.VDBId{Id: dashboardId, Validators: nil}, nil + case types.VDBIdPublic: + dashboardInfo, err := h.daService.GetValidatorDashboardPublicId(ctx, dashboardId) + if err != nil { + return nil, err + } + return &types.VDBId{Id: types.VDBIdPrimary(dashboardInfo.DashboardId), Validators: nil, AggregateGroups: !dashboardInfo.ShareSettings.ShareGroups}, nil + case validatorSet: + validators, err := h.daService.GetValidatorsFromSlices(dashboardId.Indexes, dashboardId.PublicKeys) + if err != nil { + return nil, err + } + if len(validators) == 0 { + return nil, newNotFoundErr("no validators found for given id") + } + if len(validators) > maxValidatorsInList { + return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) + } + return &types.VDBId{Validators: validators}, nil + } + return nil, errMsgParsingId +} + +// handleDashboardId is a helper function to both validate the dashboard id param and convert it to a VDBId. +// it should be used as the last validation step for all internal dashboard GET-handlers. +// Modifying handlers (POST, PUT, DELETE) should only accept primary dashboard ids and just use checkPrimaryDashboardId. +func (h *HandlerService) handleDashboardId(ctx context.Context, param string) (*types.VDBId, error) { + // validate dashboard id param + dashboardIdParam, err := parseDashboardId(param) + if err != nil { + return nil, err + } + // convert to VDBId + dashboardId, err := h.getDashboardId(ctx, dashboardIdParam) + if err != nil { + return nil, err + } + + return dashboardId, nil +} + +const chartDatapointLimit uint64 = 200 + +type ChartTimeDashboardLimits struct { + MinAllowedTs uint64 + LatestExportedTs uint64 + MaxAllowedInterval uint64 +} + +// helper function to retrieve allowed chart timestamp boundaries according to the users premium perks at the current point in time +func (h *HandlerService) getCurrentChartTimeLimitsForDashboard(ctx context.Context, dashboardId *types.VDBId, aggregation enums.ChartAggregation) (ChartTimeDashboardLimits, error) { + limits := ChartTimeDashboardLimits{} + var err error + premiumPerks, err := h.getDashboardPremiumPerks(ctx, *dashboardId) + if err != nil { + return limits, err + } + + maxAge := getMaxChartAge(aggregation, premiumPerks.ChartHistorySeconds) // can be max int for unlimited, always check for underflows + if maxAge == 0 { + return limits, newConflictErr("requested aggregation is not available for dashboard owner's premium subscription") + } + limits.LatestExportedTs, err = h.daService.GetLatestExportedChartTs(ctx, aggregation) + if err != nil { + return limits, err + } + limits.MinAllowedTs = limits.LatestExportedTs - min(maxAge, limits.LatestExportedTs) // min to prevent underflow + secondsPerEpoch := uint64(12 * 32) // TODO: fetch dashboards chain id and use correct value for network once available + limits.MaxAllowedInterval = chartDatapointLimit*uint64(aggregation.Duration(secondsPerEpoch).Seconds()) - 1 // -1 to make sure we don't go over the limit + + return limits, nil +} + +// getDashboardPremiumPerks gets the premium perks of the dashboard OWNER or if it's a guest dashboard, it returns free tier premium perks +func (h *HandlerService) getDashboardPremiumPerks(ctx context.Context, id types.VDBId) (*types.PremiumPerks, error) { + // for guest dashboards, return free tier perks + if id.Validators != nil { + perk, err := h.daService.GetFreeTierPerks(ctx) + if err != nil { + return nil, err + } + return perk, nil + } + // could be made into a single query if needed + dashboardUser, err := h.daService.GetValidatorDashboardUser(ctx, id.Id) + if err != nil { + return nil, err + } + userInfo, err := h.daService.GetUserInfo(ctx, dashboardUser.UserId) + if err != nil { + return nil, err + } + + return &userInfo.PremiumPerks, nil +} + +// getMaxChartAge returns the maximum age of a chart in seconds based on the given aggregation type and premium perks +func getMaxChartAge(aggregation enums.ChartAggregation, perkSeconds types.ChartHistorySeconds) uint64 { + aggregations := enums.ChartAggregations + switch aggregation { + case aggregations.Epoch: + return perkSeconds.Epoch + case aggregations.Hourly: + return perkSeconds.Hourly + case aggregations.Daily: + return perkSeconds.Daily + case aggregations.Weekly: + return perkSeconds.Weekly + default: + return 0 + } +} + +func isUserAdmin(user *types.UserInfo) bool { + if user == nil { + return false + } + return user.UserGroup == types.UserGroupAdmin +} + +// -------------------------------------- +// Response handling + +func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { + w.Header().Set("Content-Type", "application/json") + if response == nil { + w.WriteHeader(statusCode) + return + } + jsonData, err := json.Marshal(response) + if err != nil { + logApiError(r, fmt.Errorf("error encoding json data: %w", err), 0, + log.Fields{ + "data": fmt.Sprintf("%+v", response), + }) + w.WriteHeader(http.StatusInternalServerError) + response = types.ApiErrorResponse{ + Error: "error encoding json data", + } + if err = json.NewEncoder(w).Encode(response); err != nil { + // there seems to be an error with the lib + logApiError(r, fmt.Errorf("error encoding error response after failed encoding: %w", err), 0) + } + return + } + w.WriteHeader(statusCode) + if _, err = w.Write(jsonData); err != nil { + // already returned wrong status code to user, can't prevent that + logApiError(r, fmt.Errorf("error writing response data: %w", err), 0) + } +} + +func returnError(w http.ResponseWriter, r *http.Request, code int, err error) { + response := types.ApiErrorResponse{ + Error: err.Error(), + } + writeResponse(w, r, code, response) +} + +func returnOk(w http.ResponseWriter, r *http.Request, data interface{}) { + writeResponse(w, r, http.StatusOK, data) +} + +func returnCreated(w http.ResponseWriter, r *http.Request, data interface{}) { + writeResponse(w, r, http.StatusCreated, data) +} + +func returnNoContent(w http.ResponseWriter, r *http.Request) { + writeResponse(w, r, http.StatusNoContent, nil) +} + +// Errors + +func returnBadRequest(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusBadRequest, err) +} + +func returnUnauthorized(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusUnauthorized, err) +} + +func returnNotFound(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusNotFound, err) +} + +func returnConflict(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusConflict, err) +} + +func returnForbidden(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusForbidden, err) +} + +func returnTooManyRequests(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusTooManyRequests, err) +} + +func returnGone(w http.ResponseWriter, r *http.Request, err error) { + returnError(w, r, http.StatusGone, err) +} + +const maxBodySize = 10 * 1024 + +func logApiError(r *http.Request, err error, callerSkip int, additionalInfos ...log.Fields) { + requestFields := log.Fields{ + "request_endpoint": r.Method + " " + r.URL.Path, + } + if len(r.URL.RawQuery) > 0 { + requestFields["request_query"] = r.URL.RawQuery + } + if body, _ := io.ReadAll(io.LimitReader(r.Body, maxBodySize)); len(body) > 0 { + requestFields["request_body"] = string(body) + } + if userId, _ := GetUserIdByContext(r); userId != 0 { + requestFields["request_user_id"] = userId + } + log.Error(err, "error handling request", callerSkip+1, append(additionalInfos, requestFields)...) +} + +func handleErr(w http.ResponseWriter, r *http.Request, err error) { + _, isValidationError := err.(validationError) + switch { + case isValidationError, errors.Is(err, errBadRequest): + returnBadRequest(w, r, err) + case errors.Is(err, dataaccess.ErrNotFound): + returnNotFound(w, r, err) + case errors.Is(err, errUnauthorized): + returnUnauthorized(w, r, err) + case errors.Is(err, errForbidden): + returnForbidden(w, r, err) + case errors.Is(err, errConflict): + returnConflict(w, r, err) + case errors.Is(err, services.ErrWaiting): + returnError(w, r, http.StatusServiceUnavailable, err) + case errors.Is(err, errTooManyRequests): + returnTooManyRequests(w, r, err) + case errors.Is(err, errGone): + returnGone(w, r, err) + case errors.Is(err, context.Canceled): + if r.Context().Err() != context.Canceled { // only return error if the request context was canceled + logApiError(r, err, 1) + returnError(w, r, http.StatusInternalServerError, err) + } + default: + logApiError(r, err, 1) + // TODO: don't return the error message to the user in production + returnError(w, r, http.StatusInternalServerError, err) + } +} + +// -------------------------------------- +// Error Helpers + +func errWithMsg(err error, format string, args ...interface{}) error { + return fmt.Errorf("%w: %s", err, fmt.Sprintf(format, args...)) +} + +//nolint:nolintlint +//nolint:unparam +func newBadRequestErr(format string, args ...interface{}) error { + return errWithMsg(errBadRequest, format, args...) +} + +//nolint:unparam +func newInternalServerErr(format string, args ...interface{}) error { + return errWithMsg(errInternalServer, format, args...) +} + +//nolint:unparam +func newUnauthorizedErr(format string, args ...interface{}) error { + return errWithMsg(errUnauthorized, format, args...) +} + +func newForbiddenErr(format string, args ...interface{}) error { + return errWithMsg(errForbidden, format, args...) +} + +//nolint:unparam +func newConflictErr(format string, args ...interface{}) error { + return errWithMsg(errConflict, format, args...) +} + +//nolint:nolintlint +//nolint:unparam +func newNotFoundErr(format string, args ...interface{}) error { + return errWithMsg(dataaccess.ErrNotFound, format, args...) +} + +func newTooManyRequestsErr(format string, args ...interface{}) error { + return errWithMsg(errTooManyRequests, format, args...) +} + +func newGoneErr(format string, args ...interface{}) error { + return errWithMsg(errGone, format, args...) +} + +// -------------------------------------- +// misc. helper functions + +// maps different types of validator dashboard summary validators to a common format +func mapVDBIndices(indices interface{}) ([]types.VDBSummaryValidatorsData, error) { + if indices == nil { + return nil, errors.New("no data found when mapping") + } + + switch v := indices.(type) { + case *types.VDBGeneralSummaryValidators: + // deposited, online, offline, slashing, slashed, exited, withdrawn, pending, exiting, withdrawing + return []types.VDBSummaryValidatorsData{ + mapUintSlice("deposited", v.Deposited), + mapUintSlice("online", v.Online), + mapUintSlice("offline", v.Offline), + mapUintSlice("slashing", v.Slashing), + mapUintSlice("slashed", v.Slashed), + mapUintSlice("exited", v.Exited), + mapUintSlice("withdrawn", v.Withdrawn), + mapIndexTimestampSlice("pending", v.Pending), + mapIndexTimestampSlice("exiting", v.Exiting), + mapIndexTimestampSlice("withdrawing", v.Withdrawing), + }, nil + + case *types.VDBSyncSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapUintSlice("sync_current", v.Current), + mapUintSlice("sync_upcoming", v.Upcoming), + mapSlice("sync_past", v.Past, + func(v types.VDBValidatorSyncPast) (uint64, []uint64) { return v.Index, []uint64{v.Count} }, + ), + }, nil + + case *types.VDBSlashingsSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapSlice("got_slashed", v.GotSlashed, + func(v types.VDBValidatorGotSlashed) (uint64, []uint64) { return v.Index, []uint64{v.SlashedBy} }, + ), + mapSlice("has_slashed", v.HasSlashed, + func(v types.VDBValidatorHasSlashed) (uint64, []uint64) { return v.Index, v.SlashedIndices }, + ), + }, nil + + case *types.VDBProposalSummaryValidators: + return []types.VDBSummaryValidatorsData{ + mapIndexBlocksSlice("proposal_proposed", v.Proposed), + mapIndexBlocksSlice("proposal_missed", v.Missed), + }, nil + + default: + return nil, fmt.Errorf("unsupported indices type") + } +} + +// maps different types of validator dashboard summary validators to a common format +func mapSlice[T any](category string, validators []T, getIndexAndDutyObjects func(validator T) (index uint64, dutyObjects []uint64)) types.VDBSummaryValidatorsData { + validatorsData := make([]types.VDBSummaryValidator, len(validators)) + for i, validator := range validators { + index, dutyObjects := getIndexAndDutyObjects(validator) + validatorsData[i] = types.VDBSummaryValidator{Index: index, DutyObjects: dutyObjects} + } + return types.VDBSummaryValidatorsData{ + Category: category, + Validators: validatorsData, + } +} +func mapUintSlice(category string, validators []uint64) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v uint64) (uint64, []uint64) { return v, nil }, + ) +} + +func mapIndexTimestampSlice(category string, validators []types.IndexTimestamp) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v types.IndexTimestamp) (uint64, []uint64) { return v.Index, []uint64{v.Timestamp} }, + ) +} + +func mapIndexBlocksSlice(category string, validators []types.IndexBlocks) types.VDBSummaryValidatorsData { + return mapSlice(category, validators, + func(v types.IndexBlocks) (uint64, []uint64) { return v.Index, v.Blocks }, + ) +} + +// -------------------------------------- +// intOrString is a custom type that can be unmarshalled from either an int or a string (strings will also be parsed to int if possible). +// if unmarshaling throws no errors one of the two fields will be set, the other will be nil. +type intOrString struct { + intValue *uint64 + strValue *string +} + +func (v *intOrString) UnmarshalJSON(data []byte) error { + // Attempt to unmarshal as uint64 first + var intValue uint64 + if err := json.Unmarshal(data, &intValue); err == nil { + v.intValue = &intValue + return nil + } + + // If unmarshalling as uint64 fails, try to unmarshal as string + var strValue string + if err := json.Unmarshal(data, &strValue); err == nil { + if parsedInt, err := strconv.ParseUint(strValue, 10, 64); err == nil { + v.intValue = &parsedInt + } else { + v.strValue = &strValue + } + return nil + } + + // If both unmarshalling attempts fail, return an error + return fmt.Errorf("failed to unmarshal intOrString from json: %s", string(data)) +} + +func (v intOrString) String() string { + if v.intValue != nil { + return strconv.FormatUint(*v.intValue, 10) + } + if v.strValue != nil { + return *v.strValue + } + return "" +} + +func (intOrString) JSONSchema() *jsonschema.Schema { + return &jsonschema.Schema{ + OneOf: []*jsonschema.Schema{ + {Type: "string"}, {Type: "integer"}, + }, + } +} + +func isMocked(r *http.Request) bool { + isMocked, ok := r.Context().Value(ctxIsMockedKey).(bool) + return ok && isMocked +} diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/input_validation.go similarity index 50% rename from backend/pkg/api/handlers/common.go rename to backend/pkg/api/handlers/input_validation.go index 3f95d5854..eca791aa8 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/input_validation.go @@ -3,10 +3,7 @@ package handlers import ( "bytes" "cmp" - "context" - "encoding/base64" "encoding/json" - "errors" "fmt" "io" "net/http" @@ -16,46 +13,14 @@ import ( "strings" "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gorilla/mux" "github.com/invopop/jsonschema" "github.com/shopspring/decimal" "github.com/xeipuuv/gojsonschema" - - "github.com/alexedwards/scs/v2" - dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" - "github.com/gobitfly/beaconchain/pkg/api/enums" - "github.com/gobitfly/beaconchain/pkg/api/services" - types "github.com/gobitfly/beaconchain/pkg/api/types" ) -type HandlerService struct { - dai dataaccess.DataAccessor - dummy dataaccess.DataAccessor - scs *scs.SessionManager - isPostMachineMetricsEnabled bool // if more config options are needed, consider having the whole config in here -} - -func NewHandlerService(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAccessor, sessionManager *scs.SessionManager, enablePostMachineMetrics bool) *HandlerService { - if allNetworks == nil { - networks, err := dataAccessor.GetAllNetworks() - if err != nil { - log.Fatal(err, "error getting networks for handler", 0, nil) - } - allNetworks = networks - } - - return &HandlerService{ - dai: dataAccessor, - dummy: dummy, - scs: sessionManager, - isPostMachineMetricsEnabled: enablePostMachineMetrics, - } -} - -// all networks available in the system, filled on startup in NewHandlerService -var allNetworks []types.NetworkInfo - // -------------------------------------- var ( @@ -68,7 +33,6 @@ var ( reEthereumAddress = regexp.MustCompile(`^(0x)?[0-9a-fA-F]{40}$`) reWithdrawalCredential = regexp.MustCompile(`^(0x0[01])?[0-9a-fA-F]{62}$`) reEnsName = regexp.MustCompile(`^.+\.eth$`) - reNonEmpty = regexp.MustCompile(`^\s*\S.*$`) reGraffiti = regexp.MustCompile(`^.{2,}$`) // at least 2 characters, so that queries won't time out reCursor = regexp.MustCompile(`^[A-Za-z0-9-_]+$`) // has to be base64 reEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") @@ -93,23 +57,6 @@ const ( MaxArchivedDashboardsCount = 10 ) -var ( - errMsgParsingId = errors.New("error parsing parameter 'dashboard_id'") - errBadRequest = errors.New("bad request") - errInternalServer = errors.New("internal server error") - errUnauthorized = errors.New("unauthorized") - errForbidden = errors.New("forbidden") - errConflict = errors.New("conflict") - errTooManyRequests = errors.New("too many requests") - errGone = errors.New("gone") -) - -type Paging struct { - cursor string - limit uint64 - search string -} - // All changes to common functions MUST NOT break any public handler behavior (not in effect yet) // -------------------------------------- @@ -146,6 +93,8 @@ func (v *validationError) hasErrors() bool { return v != nil && len(*v) > 0 } +// -------------------------------------- + func (v *validationError) checkRegex(regex *regexp.Regexp, param, paramName string) string { if !regex.MatchString(param) { v.add(paramName, fmt.Sprintf(`given value '%s' has incorrect format`, param)) @@ -307,143 +256,10 @@ func (v *validationError) checkAdConfigurationKeys(keysString string) []string { return keys } -type validatorSet struct { - Indexes []types.VDBValidator - PublicKeys []string -} - -// parseDashboardId is a helper function to validate the string dashboard id param. -func parseDashboardId(id string) (interface{}, error) { - var v validationError - if reInteger.MatchString(id) { - // given id is a normal id - id := v.checkUint(id, "dashboard_id") - if v.hasErrors() { - return nil, v - } - return types.VDBIdPrimary(id), nil - } - if reValidatorDashboardPublicId.MatchString(id) { - // given id is a public id - return types.VDBIdPublic(id), nil - } - // given id must be an encoded set of validators - decodedId, err := base64.RawURLEncoding.DecodeString(id) - if err != nil { - return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) - } - indexes, publicKeys := v.checkValidatorList(string(decodedId), forbidEmpty) - if v.hasErrors() { - return nil, newBadRequestErr("given value '%s' is not a valid dashboard id", id) - } - return validatorSet{Indexes: indexes, PublicKeys: publicKeys}, nil -} - -// getDashboardId is a helper function to convert the dashboard id param to a VDBId. -// precondition: dashboardIdParam must be a valid dashboard id and either a primary id, public id, or list of validators. -func (h *HandlerService) getDashboardId(ctx context.Context, dashboardIdParam interface{}) (*types.VDBId, error) { - switch dashboardId := dashboardIdParam.(type) { - case types.VDBIdPrimary: - return &types.VDBId{Id: dashboardId, Validators: nil}, nil - case types.VDBIdPublic: - dashboardInfo, err := h.dai.GetValidatorDashboardPublicId(ctx, dashboardId) - if err != nil { - return nil, err - } - return &types.VDBId{Id: types.VDBIdPrimary(dashboardInfo.DashboardId), Validators: nil, AggregateGroups: !dashboardInfo.ShareSettings.ShareGroups}, nil - case validatorSet: - validators, err := h.dai.GetValidatorsFromSlices(dashboardId.Indexes, dashboardId.PublicKeys) - if err != nil { - return nil, err - } - if len(validators) == 0 { - return nil, newNotFoundErr("no validators found for given id") - } - if len(validators) > maxValidatorsInList { - return nil, newBadRequestErr("too many validators in list, maximum is %d", maxValidatorsInList) - } - return &types.VDBId{Validators: validators}, nil - } - return nil, errMsgParsingId -} - -// handleDashboardId is a helper function to both validate the dashboard id param and convert it to a VDBId. -// it should be used as the last validation step for all internal dashboard GET-handlers. -// Modifying handlers (POST, PUT, DELETE) should only accept primary dashboard ids and just use checkPrimaryDashboardId. -func (h *HandlerService) handleDashboardId(ctx context.Context, param string) (*types.VDBId, error) { - // validate dashboard id param - dashboardIdParam, err := parseDashboardId(param) - if err != nil { - return nil, err - } - // convert to VDBId - dashboardId, err := h.getDashboardId(ctx, dashboardIdParam) - if err != nil { - return nil, err - } - - return dashboardId, nil -} - -const chartDatapointLimit uint64 = 200 - -type ChartTimeDashboardLimits struct { - MinAllowedTs uint64 - LatestExportedTs uint64 - MaxAllowedInterval uint64 -} - -// helper function to retrieve allowed chart timestamp boundaries according to the users premium perks at the current point in time -func (h *HandlerService) getCurrentChartTimeLimitsForDashboard(ctx context.Context, dashboardId *types.VDBId, aggregation enums.ChartAggregation) (ChartTimeDashboardLimits, error) { - limits := ChartTimeDashboardLimits{} - var err error - premiumPerks, err := h.getDashboardPremiumPerks(ctx, *dashboardId) - if err != nil { - return limits, err - } - - maxAge := getMaxChartAge(aggregation, premiumPerks.ChartHistorySeconds) // can be max int for unlimited, always check for underflows - if maxAge == 0 { - return limits, newConflictErr("requested aggregation is not available for dashboard owner's premium subscription") - } - limits.LatestExportedTs, err = h.dai.GetLatestExportedChartTs(ctx, aggregation) - if err != nil { - return limits, err - } - limits.MinAllowedTs = limits.LatestExportedTs - min(maxAge, limits.LatestExportedTs) // min to prevent underflow - secondsPerEpoch := uint64(12 * 32) // TODO: fetch dashboards chain id and use correct value for network once available - limits.MaxAllowedInterval = chartDatapointLimit*uint64(aggregation.Duration(secondsPerEpoch).Seconds()) - 1 // -1 to make sure we don't go over the limit - - return limits, nil -} - func (v *validationError) checkPrimaryDashboardId(param string) types.VDBIdPrimary { return types.VDBIdPrimary(v.checkUint(param, "dashboard_id")) } -// getDashboardPremiumPerks gets the premium perks of the dashboard OWNER or if it's a guest dashboard, it returns free tier premium perks -func (h *HandlerService) getDashboardPremiumPerks(ctx context.Context, id types.VDBId) (*types.PremiumPerks, error) { - // for guest dashboards, return free tier perks - if id.Validators != nil { - perk, err := h.dai.GetFreeTierPerks(ctx) - if err != nil { - return nil, err - } - return perk, nil - } - // could be made into a single query if needed - dashboardUser, err := h.dai.GetValidatorDashboardUser(ctx, id.Id) - if err != nil { - return nil, err - } - userInfo, err := h.dai.GetUserInfo(ctx, dashboardUser.UserId) - if err != nil { - return nil, err - } - - return &userInfo.PremiumPerks, nil -} - // helper function to unify handling of block detail request validation func (h *HandlerService) validateBlockRequest(r *http.Request, paramName string) (uint64, uint64, error) { var v validationError @@ -454,9 +270,9 @@ func (h *HandlerService) validateBlockRequest(r *http.Request, paramName string) // possibly add other values like "genesis", "finalized", hardforks etc. later case "latest": if paramName == "block" { - value, err = h.dai.GetLatestBlock() + value, err = h.daService.GetLatestBlock() } else if paramName == "slot" { - value, err = h.dai.GetLatestSlot() + value, err = h.daService.GetLatestSlot() } if err != nil { return 0, 0, err @@ -484,7 +300,6 @@ func (v *validationError) checkExistingGroupId(param string) uint64 { return v.checkUint(param, "group_id") } -//nolint:unparam func splitParameters(params string, delim rune) []string { // This splits the string by delim and removes empty strings f := func(c rune) bool { @@ -531,6 +346,12 @@ func (v *validationError) checkUintMinMax(param string, min uint64, max uint64, return checkMinMax(v, v.checkUint(param, paramName), min, max, paramName) } +type Paging struct { + cursor string + limit uint64 + search string +} + func (v *validationError) checkPagingParams(q url.Values) Paging { paging := Paging{ cursor: q.Get("cursor"), @@ -560,16 +381,6 @@ func checkEnum[T enums.EnumFactory[T]](v *validationError, enumString string, na return enum } -// better func name would be -func checkValueInAllowed[T cmp.Ordered](v *validationError, value T, allowed []T, name string) { - for _, a := range allowed { - if cmp.Compare(value, a) == 0 { - return - } - } - v.add(name, "parameter is missing or invalid, please check the API documentation") -} - func (v *validationError) parseSortOrder(order string) bool { switch order { case "": @@ -589,7 +400,7 @@ func checkSort[T enums.EnumFactory[T]](v *validationError, sortString string) *t if sortString == "" { return &types.Sort[T]{Column: c, Desc: defaultDesc} } - sortSplit := strings.Split(sortString, ":") + sortSplit := splitParameters(sortString, ':') if len(sortSplit) > 2 { v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, expected format is '[:(asc|desc)]'", sortString)) return nil @@ -689,11 +500,7 @@ func (v *validationError) checkNetworkParameter(param string) uint64 { return v.checkNetwork(intOrString{strValue: ¶m}) } -//nolint:unused func (v *validationError) checkNetworksParameter(param string) []uint64 { - if param == "" { - v.add("networks", "list of networks must not be empty") - } var chainIds []uint64 for _, network := range splitParameters(param, ',') { chainIds = append(chainIds, v.checkNetworkParameter(network)) @@ -748,334 +555,3 @@ func (v *validationError) checkTimestamps(r *http.Request, chartLimits ChartTime return afterTs, beforeTs } } - -// getMaxChartAge returns the maximum age of a chart in seconds based on the given aggregation type and premium perks -func getMaxChartAge(aggregation enums.ChartAggregation, perkSeconds types.ChartHistorySeconds) uint64 { - aggregations := enums.ChartAggregations - switch aggregation { - case aggregations.Epoch: - return perkSeconds.Epoch - case aggregations.Hourly: - return perkSeconds.Hourly - case aggregations.Daily: - return perkSeconds.Daily - case aggregations.Weekly: - return perkSeconds.Weekly - default: - return 0 - } -} - -func isUserAdmin(user *types.UserInfo) bool { - if user == nil { - return false - } - return user.UserGroup == types.UserGroupAdmin -} - -// -------------------------------------- -// Response handling - -func writeResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) { - w.Header().Set("Content-Type", "application/json") - if response == nil { - w.WriteHeader(statusCode) - return - } - jsonData, err := json.Marshal(response) - if err != nil { - logApiError(r, fmt.Errorf("error encoding json data: %w", err), 0, - log.Fields{ - "data": fmt.Sprintf("%+v", response), - }) - w.WriteHeader(http.StatusInternalServerError) - response = types.ApiErrorResponse{ - Error: "error encoding json data", - } - if err = json.NewEncoder(w).Encode(response); err != nil { - // there seems to be an error with the lib - logApiError(r, fmt.Errorf("error encoding error response after failed encoding: %w", err), 0) - } - return - } - w.WriteHeader(statusCode) - if _, err = w.Write(jsonData); err != nil { - // already returned wrong status code to user, can't prevent that - logApiError(r, fmt.Errorf("error writing response data: %w", err), 0) - } -} - -func returnError(w http.ResponseWriter, r *http.Request, code int, err error) { - response := types.ApiErrorResponse{ - Error: err.Error(), - } - writeResponse(w, r, code, response) -} - -func returnOk(w http.ResponseWriter, r *http.Request, data interface{}) { - writeResponse(w, r, http.StatusOK, data) -} - -func returnCreated(w http.ResponseWriter, r *http.Request, data interface{}) { - writeResponse(w, r, http.StatusCreated, data) -} - -func returnNoContent(w http.ResponseWriter, r *http.Request) { - writeResponse(w, r, http.StatusNoContent, nil) -} - -// Errors - -func returnBadRequest(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusBadRequest, err) -} - -func returnUnauthorized(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusUnauthorized, err) -} - -func returnNotFound(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusNotFound, err) -} - -func returnConflict(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusConflict, err) -} - -func returnForbidden(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusForbidden, err) -} - -func returnTooManyRequests(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusTooManyRequests, err) -} - -func returnGone(w http.ResponseWriter, r *http.Request, err error) { - returnError(w, r, http.StatusGone, err) -} - -const maxBodySize = 10 * 1024 - -func logApiError(r *http.Request, err error, callerSkip int, additionalInfos ...log.Fields) { - body, _ := io.ReadAll(io.LimitReader(r.Body, maxBodySize)) - requestFields := log.Fields{ - "request_endpoint": r.Method + " " + r.URL.Path, - "request_query": r.URL.RawQuery, - "request_body": string(body), - } - log.Error(err, "error handling request", callerSkip+1, append(additionalInfos, requestFields)...) -} - -func handleErr(w http.ResponseWriter, r *http.Request, err error) { - _, isValidationError := err.(validationError) - switch { - case isValidationError, errors.Is(err, errBadRequest): - returnBadRequest(w, r, err) - case errors.Is(err, dataaccess.ErrNotFound): - returnNotFound(w, r, err) - case errors.Is(err, errUnauthorized): - returnUnauthorized(w, r, err) - case errors.Is(err, errForbidden): - returnForbidden(w, r, err) - case errors.Is(err, errConflict): - returnConflict(w, r, err) - case errors.Is(err, services.ErrWaiting): - returnError(w, r, http.StatusServiceUnavailable, err) - case errors.Is(err, errTooManyRequests): - returnTooManyRequests(w, r, err) - case errors.Is(err, errGone): - returnGone(w, r, err) - default: - logApiError(r, err, 1) - // TODO: don't return the error message to the user in production - returnError(w, r, http.StatusInternalServerError, err) - } -} - -// -------------------------------------- -// Error Helpers - -func errWithMsg(err error, format string, args ...interface{}) error { - return fmt.Errorf("%w: %s", err, fmt.Sprintf(format, args...)) -} - -//nolint:nolintlint -//nolint:unparam -func newBadRequestErr(format string, args ...interface{}) error { - return errWithMsg(errBadRequest, format, args...) -} - -//nolint:unparam -func newInternalServerErr(format string, args ...interface{}) error { - return errWithMsg(errInternalServer, format, args...) -} - -//nolint:unparam -func newUnauthorizedErr(format string, args ...interface{}) error { - return errWithMsg(errUnauthorized, format, args...) -} - -func newForbiddenErr(format string, args ...interface{}) error { - return errWithMsg(errForbidden, format, args...) -} - -//nolint:unparam -func newConflictErr(format string, args ...interface{}) error { - return errWithMsg(errConflict, format, args...) -} - -//nolint:nolintlint -//nolint:unparam -func newNotFoundErr(format string, args ...interface{}) error { - return errWithMsg(dataaccess.ErrNotFound, format, args...) -} - -func newTooManyRequestsErr(format string, args ...interface{}) error { - return errWithMsg(errTooManyRequests, format, args...) -} - -func newGoneErr(format string, args ...interface{}) error { - return errWithMsg(errGone, format, args...) -} - -// -------------------------------------- -// misc. helper functions - -// maps different types of validator dashboard summary validators to a common format -func mapVDBIndices(indices interface{}) ([]types.VDBSummaryValidatorsData, error) { - if indices == nil { - return nil, errors.New("no data found when mapping") - } - - switch v := indices.(type) { - case *types.VDBGeneralSummaryValidators: - // deposited, online, offline, slashing, slashed, exited, withdrawn, pending, exiting, withdrawing - return []types.VDBSummaryValidatorsData{ - mapUintSlice("deposited", v.Deposited), - mapUintSlice("online", v.Online), - mapUintSlice("offline", v.Offline), - mapUintSlice("slashing", v.Slashing), - mapUintSlice("slashed", v.Slashed), - mapUintSlice("exited", v.Exited), - mapUintSlice("withdrawn", v.Withdrawn), - mapIndexTimestampSlice("pending", v.Pending), - mapIndexTimestampSlice("exiting", v.Exiting), - mapIndexTimestampSlice("withdrawing", v.Withdrawing), - }, nil - - case *types.VDBSyncSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapUintSlice("sync_current", v.Current), - mapUintSlice("sync_upcoming", v.Upcoming), - mapSlice("sync_past", v.Past, - func(v types.VDBValidatorSyncPast) (uint64, []uint64) { return v.Index, []uint64{v.Count} }, - ), - }, nil - - case *types.VDBSlashingsSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapSlice("got_slashed", v.GotSlashed, - func(v types.VDBValidatorGotSlashed) (uint64, []uint64) { return v.Index, []uint64{v.SlashedBy} }, - ), - mapSlice("has_slashed", v.HasSlashed, - func(v types.VDBValidatorHasSlashed) (uint64, []uint64) { return v.Index, v.SlashedIndices }, - ), - }, nil - - case *types.VDBProposalSummaryValidators: - return []types.VDBSummaryValidatorsData{ - mapIndexBlocksSlice("proposal_proposed", v.Proposed), - mapIndexBlocksSlice("proposal_missed", v.Missed), - }, nil - - default: - return nil, fmt.Errorf("unsupported indices type") - } -} - -// maps different types of validator dashboard summary validators to a common format -func mapSlice[T any](category string, validators []T, getIndexAndDutyObjects func(validator T) (index uint64, dutyObjects []uint64)) types.VDBSummaryValidatorsData { - validatorsData := make([]types.VDBSummaryValidator, len(validators)) - for i, validator := range validators { - index, dutyObjects := getIndexAndDutyObjects(validator) - validatorsData[i] = types.VDBSummaryValidator{Index: index, DutyObjects: dutyObjects} - } - return types.VDBSummaryValidatorsData{ - Category: category, - Validators: validatorsData, - } -} -func mapUintSlice(category string, validators []uint64) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v uint64) (uint64, []uint64) { return v, nil }, - ) -} - -func mapIndexTimestampSlice(category string, validators []types.IndexTimestamp) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v types.IndexTimestamp) (uint64, []uint64) { return v.Index, []uint64{v.Timestamp} }, - ) -} - -func mapIndexBlocksSlice(category string, validators []types.IndexBlocks) types.VDBSummaryValidatorsData { - return mapSlice(category, validators, - func(v types.IndexBlocks) (uint64, []uint64) { return v.Index, v.Blocks }, - ) -} - -// -------------------------------------- -// intOrString is a custom type that can be unmarshalled from either an int or a string (strings will also be parsed to int if possible). -// if unmarshaling throws no errors one of the two fields will be set, the other will be nil. -type intOrString struct { - intValue *uint64 - strValue *string -} - -func (v *intOrString) UnmarshalJSON(data []byte) error { - // Attempt to unmarshal as uint64 first - var intValue uint64 - if err := json.Unmarshal(data, &intValue); err == nil { - v.intValue = &intValue - return nil - } - - // If unmarshalling as uint64 fails, try to unmarshal as string - var strValue string - if err := json.Unmarshal(data, &strValue); err == nil { - if parsedInt, err := strconv.ParseUint(strValue, 10, 64); err == nil { - v.intValue = &parsedInt - } else { - v.strValue = &strValue - } - return nil - } - - // If both unmarshalling attempts fail, return an error - return fmt.Errorf("failed to unmarshal intOrString from json: %s", string(data)) -} - -func (v intOrString) String() string { - if v.intValue != nil { - return strconv.FormatUint(*v.intValue, 10) - } - if v.strValue != nil { - return *v.strValue - } - return "" -} - -func (intOrString) JSONSchema() *jsonschema.Schema { - return &jsonschema.Schema{ - OneOf: []*jsonschema.Schema{ - {Type: "string"}, {Type: "integer"}, - }, - } -} - -func isMockEnabled(r *http.Request) bool { - isMockEnabled, ok := r.Context().Value(ctxIsMockEnabledKey).(bool) - if !ok { - return false - } - return isMockEnabled -} diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index ef1310237..3093352f9 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -14,7 +14,7 @@ import ( // Premium Plans func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetProductSummary(r.Context()) + data, err := h.daService.GetProductSummary(r.Context()) if err != nil { handleErr(w, r, err) return @@ -29,7 +29,7 @@ func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *htt // API Ratelimit Weights func (h *HandlerService) InternalGetRatelimitWeights(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetApiWeights(r.Context()) + data, err := h.daService.GetApiWeights(r.Context()) if err != nil { handleErr(w, r, err) return @@ -44,19 +44,19 @@ func (h *HandlerService) InternalGetRatelimitWeights(w http.ResponseWriter, r *h // Latest State func (h *HandlerService) InternalGetLatestState(w http.ResponseWriter, r *http.Request) { - latestSlot, err := h.dai.GetLatestSlot() + latestSlot, err := h.daService.GetLatestSlot() if err != nil { handleErr(w, r, err) return } - finalizedEpoch, err := h.dai.GetLatestFinalizedEpoch() + finalizedEpoch, err := h.daService.GetLatestFinalizedEpoch() if err != nil { handleErr(w, r, err) return } - exchangeRates, err := h.dai.GetLatestExchangeRates() + exchangeRates, err := h.daService.GetLatestExchangeRates() if err != nil { handleErr(w, r, err) return @@ -74,7 +74,7 @@ func (h *HandlerService) InternalGetLatestState(w http.ResponseWriter, r *http.R } func (h *HandlerService) InternalGetRocketPool(w http.ResponseWriter, r *http.Request) { - data, err := h.dai.GetRocketPoolOverview(r.Context()) + data, err := h.daService.GetRocketPoolOverview(r.Context()) if err != nil { handleErr(w, r, err) return @@ -126,7 +126,7 @@ func (h *HandlerService) InternalPostAdConfigurations(w http.ResponseWriter, r * return } - err = h.dai.CreateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) + err = h.daService.CreateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) if err != nil { handleErr(w, r, err) return @@ -156,7 +156,7 @@ func (h *HandlerService) InternalGetAdConfigurations(w http.ResponseWriter, r *h return } - data, err := h.dai.GetAdConfigurations(r.Context(), keys) + data, err := h.daService.GetAdConfigurations(r.Context(), keys) if err != nil { handleErr(w, r, err) return @@ -202,7 +202,7 @@ func (h *HandlerService) InternalPutAdConfiguration(w http.ResponseWriter, r *ht return } - err = h.dai.UpdateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) + err = h.daService.UpdateAdConfiguration(r.Context(), key, req.JQuerySelector, insertMode, req.RefreshInterval, req.ForAllUsers, req.BannerId, req.HtmlContent, req.Enabled) if err != nil { handleErr(w, r, err) return @@ -232,7 +232,7 @@ func (h *HandlerService) InternalDeleteAdConfiguration(w http.ResponseWriter, r return } - err = h.dai.RemoveAdConfiguration(r.Context(), key) + err = h.daService.RemoveAdConfiguration(r.Context(), key) if err != nil { handleErr(w, r, err) return @@ -251,7 +251,7 @@ func (h *HandlerService) InternalGetUserInfo(w http.ResponseWriter, r *http.Requ handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), user.Id) + userInfo, err := h.daService.GetUserInfo(r.Context(), user.Id) if err != nil { handleErr(w, r, err) return @@ -367,6 +367,34 @@ func (h *HandlerService) InternalGetValidatorDashboardValidators(w http.Response h.PublicGetValidatorDashboardValidators(w, r) } +func (h *HandlerService) InternalGetValidatorDashboardMobileValidators(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + + period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") + sort := checkSort[enums.VDBMobileValidatorsColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.daService.GetValidatorDashboardMobileValidators(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetValidatorDashboardMobileValidatorsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + func (h *HandlerService) InternalDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { h.PublicDeleteValidatorDashboardValidators(w, r) } @@ -467,10 +495,6 @@ func (h *HandlerService) InternalGetValidatorDashboardTotalRocketPool(w http.Res h.PublicGetValidatorDashboardTotalRocketPool(w, r) } -func (h *HandlerService) InternalGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { - h.PublicGetValidatorDashboardNodeRocketPool(w, r) -} - func (h *HandlerService) InternalGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { h.PublicGetValidatorDashboardRocketPoolMinipools(w, r) } @@ -488,7 +512,7 @@ func (h *HandlerService) InternalGetValidatorDashboardMobileWidget(w http.Respon handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -497,7 +521,7 @@ func (h *HandlerService) InternalGetValidatorDashboardMobileWidget(w http.Respon returnForbidden(w, r, errors.New("user does not have access to mobile app widget")) return } - data, err := h.dai.GetValidatorDashboardMobileWidget(r.Context(), dashboardId) + data, err := h.daService.GetValidatorDashboardMobileWidget(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -521,7 +545,7 @@ func (h *HandlerService) InternalGetMobileLatestBundle(w http.ResponseWriter, r handleErr(w, r, v) return } - stats, err := h.dai.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) + stats, err := h.daService.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) if err != nil { handleErr(w, r, err) return @@ -546,7 +570,7 @@ func (h *HandlerService) InternalPostMobileBundleDeliveries(w http.ResponseWrite handleErr(w, r, v) return } - err := h.dai.IncrementBundleDeliveryCount(r.Context(), bundleVersion) + err := h.daService.IncrementBundleDeliveryCount(r.Context(), bundleVersion) if err != nil { handleErr(w, r, err) return @@ -647,7 +671,7 @@ func (h *HandlerService) InternalGetBlock(w http.ResponseWriter, r *http.Request return } - data, err := h.dai.GetBlock(r.Context(), chainId, block) + data, err := h.daService.GetBlock(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -666,7 +690,7 @@ func (h *HandlerService) InternalGetBlockOverview(w http.ResponseWriter, r *http return } - data, err := h.dai.GetBlockOverview(r.Context(), chainId, block) + data, err := h.daService.GetBlockOverview(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -684,7 +708,7 @@ func (h *HandlerService) InternalGetBlockTransactions(w http.ResponseWriter, r * return } - data, err := h.dai.GetBlockTransactions(r.Context(), chainId, block) + data, err := h.daService.GetBlockTransactions(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -703,7 +727,7 @@ func (h *HandlerService) InternalGetBlockVotes(w http.ResponseWriter, r *http.Re return } - data, err := h.dai.GetBlockVotes(r.Context(), chainId, block) + data, err := h.daService.GetBlockVotes(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -722,7 +746,7 @@ func (h *HandlerService) InternalGetBlockAttestations(w http.ResponseWriter, r * return } - data, err := h.dai.GetBlockAttestations(r.Context(), chainId, block) + data, err := h.daService.GetBlockAttestations(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -741,7 +765,7 @@ func (h *HandlerService) InternalGetBlockWithdrawals(w http.ResponseWriter, r *h return } - data, err := h.dai.GetBlockWithdrawals(r.Context(), chainId, block) + data, err := h.daService.GetBlockWithdrawals(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -760,7 +784,7 @@ func (h *HandlerService) InternalGetBlockBlsChanges(w http.ResponseWriter, r *ht return } - data, err := h.dai.GetBlockBlsChanges(r.Context(), chainId, block) + data, err := h.daService.GetBlockBlsChanges(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -779,7 +803,7 @@ func (h *HandlerService) InternalGetBlockVoluntaryExits(w http.ResponseWriter, r return } - data, err := h.dai.GetBlockVoluntaryExits(r.Context(), chainId, block) + data, err := h.daService.GetBlockVoluntaryExits(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -798,7 +822,7 @@ func (h *HandlerService) InternalGetBlockBlobs(w http.ResponseWriter, r *http.Re return } - data, err := h.dai.GetBlockBlobs(r.Context(), chainId, block) + data, err := h.daService.GetBlockBlobs(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -820,7 +844,7 @@ func (h *HandlerService) InternalGetSlot(w http.ResponseWriter, r *http.Request) return } - data, err := h.dai.GetSlot(r.Context(), chainId, block) + data, err := h.daService.GetSlot(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -839,7 +863,7 @@ func (h *HandlerService) InternalGetSlotOverview(w http.ResponseWriter, r *http. return } - data, err := h.dai.GetSlotOverview(r.Context(), chainId, block) + data, err := h.daService.GetSlotOverview(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -857,7 +881,7 @@ func (h *HandlerService) InternalGetSlotTransactions(w http.ResponseWriter, r *h return } - data, err := h.dai.GetSlotTransactions(r.Context(), chainId, block) + data, err := h.daService.GetSlotTransactions(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -876,7 +900,7 @@ func (h *HandlerService) InternalGetSlotVotes(w http.ResponseWriter, r *http.Req return } - data, err := h.dai.GetSlotVotes(r.Context(), chainId, block) + data, err := h.daService.GetSlotVotes(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -895,7 +919,7 @@ func (h *HandlerService) InternalGetSlotAttestations(w http.ResponseWriter, r *h return } - data, err := h.dai.GetSlotAttestations(r.Context(), chainId, block) + data, err := h.daService.GetSlotAttestations(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -914,7 +938,7 @@ func (h *HandlerService) InternalGetSlotWithdrawals(w http.ResponseWriter, r *ht return } - data, err := h.dai.GetSlotWithdrawals(r.Context(), chainId, block) + data, err := h.daService.GetSlotWithdrawals(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -933,7 +957,7 @@ func (h *HandlerService) InternalGetSlotBlsChanges(w http.ResponseWriter, r *htt return } - data, err := h.dai.GetSlotBlsChanges(r.Context(), chainId, block) + data, err := h.daService.GetSlotBlsChanges(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -952,7 +976,7 @@ func (h *HandlerService) InternalGetSlotVoluntaryExits(w http.ResponseWriter, r return } - data, err := h.dai.GetSlotVoluntaryExits(r.Context(), chainId, block) + data, err := h.daService.GetSlotVoluntaryExits(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return @@ -971,7 +995,7 @@ func (h *HandlerService) InternalGetSlotBlobs(w http.ResponseWriter, r *http.Req return } - data, err := h.dai.GetSlotBlobs(r.Context(), chainId, block) + data, err := h.daService.GetSlotBlobs(r.Context(), chainId, block) if err != nil { handleErr(w, r, err) return diff --git a/backend/pkg/api/handlers/machine_metrics.go b/backend/pkg/api/handlers/machine_metrics.go index 5a84ce435..9bef843d5 100644 --- a/backend/pkg/api/handlers/machine_metrics.go +++ b/backend/pkg/api/handlers/machine_metrics.go @@ -1,9 +1,21 @@ package handlers import ( + "context" + "encoding/json" + "fmt" + "io" "net/http" + "strings" "github.com/gobitfly/beaconchain/pkg/api/types" + + "github.com/gobitfly/beaconchain/pkg/commons/db" + commontypes "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + + "google.golang.org/protobuf/proto" ) func (h *HandlerService) InternalGetUserMachineMetrics(w http.ResponseWriter, r *http.Request) { @@ -17,6 +29,13 @@ func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *h handleErr(w, r, err) return } + + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() offset := v.checkUint(q.Get("offset"), "offset") limit := uint64(180) @@ -24,7 +43,15 @@ func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *h limit = v.checkUint(limitParam, "limit") } - data, err := h.dai.GetUserMachineMetrics(r.Context(), userId, limit, offset) + // validate limit and offset according to user's premium perks + maxDataPoints := userInfo.PremiumPerks.MachineMonitoringHistorySeconds / 60 // one entry per minute + timeframe := offset + limit + if timeframe > maxDataPoints { + limit = maxDataPoints + offset = 0 + } + + data, err := h.daService.GetUserMachineMetrics(r.Context(), userId, int(limit), int(offset)) if err != nil { handleErr(w, r, err) return @@ -35,3 +62,158 @@ func (h *HandlerService) PublicGetUserMachineMetrics(w http.ResponseWriter, r *h returnOk(w, r, response) } + +func (h *HandlerService) LegacyPostUserMachineMetrics(w http.ResponseWriter, r *http.Request) { + q := r.URL.Query() + apiKey := q.Get("apikey") + machine := q.Get("machine") + + if apiKey == "" { + apiKey = r.Header.Get("apikey") + } + + if !h.isPostMachineMetricsEnabled { + returnError(w, r, http.StatusServiceUnavailable, fmt.Errorf("machine metrics pushing is temporarily disabled")) + return + } + + userID, err := h.daService.GetUserIdByApiKey(r.Context(), apiKey) + if err != nil { + returnBadRequest(w, r, fmt.Errorf("no user found with api key")) + return + } + + userInfo, err := h.daService.GetUserInfo(r.Context(), userID) + if err != nil { + handleErr(w, r, err) + return + } + + if contentType := r.Header.Get("Content-Type"); !reJsonContentType.MatchString(contentType) { + returnBadRequest(w, r, fmt.Errorf("invalid content type, expected application/json")) + return + } + + body, err := io.ReadAll(r.Body) + if err != nil { + returnBadRequest(w, r, fmt.Errorf("could not read request body")) + return + } + + var jsonObjects []map[string]interface{} + err = json.Unmarshal(body, &jsonObjects) + if err != nil { + var jsonObject map[string]interface{} + err = json.Unmarshal(body, &jsonObject) + if err != nil { + returnBadRequest(w, r, errors.Wrap(err, "Invalid JSON format in request body")) + return + } + jsonObjects = []map[string]interface{}{jsonObject} + } + + if len(jsonObjects) >= 10 { + returnBadRequest(w, r, fmt.Errorf("Max number of stat entries are 10")) + return + } + + var rateLimitErrs = 0 + for i := 0; i < len(jsonObjects); i++ { + err := h.internal_processMachine(r.Context(), machine, &jsonObjects[i], userInfo) + if err != nil { + if strings.HasPrefix(err.Error(), "rate limit") { + rateLimitErrs++ + continue + } + handleErr(w, r, err) + return + } + } + + if rateLimitErrs >= len(jsonObjects) { + returnTooManyRequests(w, r, fmt.Errorf("too many metric requests, max allowed is 1 per user per machine per process")) + return + } + + returnOk(w, r, nil) +} + +func (h *HandlerService) internal_processMachine(context context.Context, machine string, obj *map[string]interface{}, userInfo *types.UserInfo) error { + var parsedMeta *commontypes.StatsMeta + err := mapstructure.Decode(obj, &parsedMeta) + if err != nil { + return fmt.Errorf("%w: %w", errBadRequest, err) + } + + parsedMeta.Machine = machine + + if parsedMeta.Version > 2 || parsedMeta.Version <= 0 { + return newBadRequestErr("unsupported data format version") + } + + if parsedMeta.Process != "validator" && parsedMeta.Process != "beaconnode" && parsedMeta.Process != "slasher" && parsedMeta.Process != "system" { + return newBadRequestErr("unknown process") + } + + maxNodes := userInfo.PremiumPerks.MonitorMachines + + count, err := db.BigtableClient.GetMachineMetricsMachineCount(commontypes.UserId(userInfo.Id)) + if err != nil { + return errors.Wrap(err, "could not get machine count") + } + + if count > maxNodes { + return newForbiddenErr("user has reached max machine count") + } + + // protobuf encode + var data []byte + if parsedMeta.Process == "system" { + var parsedResponse *commontypes.MachineMetricSystem + err = DecodeMapStructure(obj, &parsedResponse) + if err != nil { + return fmt.Errorf("%w: %w could not parse stats (system stats)", errBadRequest, err) + } + data, err = proto.Marshal(parsedResponse) + if err != nil { + return errors.Wrap(err, "could not parse stats (system stats)") + } + } else if parsedMeta.Process == "validator" { + var parsedResponse *commontypes.MachineMetricValidator + err = DecodeMapStructure(obj, &parsedResponse) + if err != nil { + return fmt.Errorf("%w: %w could not parse stats (validator stats)", errBadRequest, err) + } + data, err = proto.Marshal(parsedResponse) + if err != nil { + return errors.Wrap(err, "could not parse stats (validator stats)") + } + } else if parsedMeta.Process == "beaconnode" { + var parsedResponse *commontypes.MachineMetricNode + err = DecodeMapStructure(obj, &parsedResponse) + if err != nil { + return fmt.Errorf("%w: %w could not parse stats (beaconnode stats)", errBadRequest, err) + } + data, err = proto.Marshal(parsedResponse) + if err != nil { + return errors.Wrap(err, "could not parse stats (beaconnode stats)") + } + } + + return h.daService.PostUserMachineMetrics(context, userInfo.Id, machine, parsedMeta.Process, data) +} + +func DecodeMapStructure(input interface{}, output interface{}) error { + config := &mapstructure.DecoderConfig{ + Metadata: nil, + Result: output, + TagName: "json", + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} diff --git a/backend/pkg/api/handlers/middlewares.go b/backend/pkg/api/handlers/middlewares.go new file mode 100644 index 000000000..54238cc59 --- /dev/null +++ b/backend/pkg/api/handlers/middlewares.go @@ -0,0 +1,200 @@ +package handlers + +import ( + "context" + "errors" + "net/http" + "slices" + "strconv" + + "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gorilla/mux" +) + +// Middlewares + +// middleware that stores user id in context, using the provided function +func StoreUserIdMiddleware(next http.Handler, userIdFunc func(r *http.Request) (uint64, error)) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + userId, err := userIdFunc(r) + if err != nil { + if errors.Is(err, errUnauthorized) { + // if next handler requires authentication, it should return 'unauthorized' itself + next.ServeHTTP(w, r) + } else { + handleErr(w, r, err) + } + return + } + + // store user id in context + ctx := r.Context() + ctx = context.WithValue(ctx, ctxUserIdKey, userId) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) +} + +// middleware that stores user id in context, using the session to get the user id +func (h *HandlerService) StoreUserIdBySessionMiddleware(next http.Handler) http.Handler { + return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { + return h.GetUserIdBySession(r) + }) +} + +// middleware that stores user id in context, using the api key to get the user id +func (h *HandlerService) StoreUserIdByApiKeyMiddleware(next http.Handler) http.Handler { + return StoreUserIdMiddleware(next, func(r *http.Request) (uint64, error) { + return h.GetUserIdByApiKey(r) + }) +} + +// middleware that checks if user has access to dashboard when a primary id is used +func (h *HandlerService) VDBAuthMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // if mock data is used, no need to check access + if isMockEnabled, ok := r.Context().Value(ctxIsMockedKey).(bool); ok && isMockEnabled { + next.ServeHTTP(w, r) + return + } + var err error + dashboardId, err := strconv.ParseUint(mux.Vars(r)["dashboard_id"], 10, 64) + if err != nil { + // if primary id is not used, no need to check access + next.ServeHTTP(w, r) + return + } + // primary id is used -> user needs to have access to dashboard + + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + // store user id in context + ctx := r.Context() + ctx = context.WithValue(ctx, ctxUserIdKey, userId) + r = r.WithContext(ctx) + + dashboardUser, err := h.daService.GetValidatorDashboardUser(r.Context(), types.VDBIdPrimary(dashboardId)) + if err != nil { + handleErr(w, r, err) + return + } + + if dashboardUser.UserId != userId { + // user does not have access to dashboard + // the proper error would be 403 Forbidden, but we don't want to leak information so we return 404 Not Found + handleErr(w, r, newNotFoundErr("dashboard with id %v not found", dashboardId)) + return + } + + next.ServeHTTP(w, r) + }) +} + +// Common middleware logic for checking user premium perks +func (h *HandlerService) PremiumPerkCheckMiddleware(next http.Handler, hasRequiredPerk func(premiumPerks types.PremiumPerks) bool) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // get user id from context + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + // get user info + userInfo, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + + // check if user has the required premium perk + if !hasRequiredPerk(userInfo.PremiumPerks) { + handleErr(w, r, newForbiddenErr("users premium perks do not allow usage of this endpoint")) + return + } + + next.ServeHTTP(w, r) + }) +} + +// Middleware for managing dashboards via API +func (h *HandlerService) ManageDashboardsViaApiCheckMiddleware(next http.Handler) http.Handler { + return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { + return premiumPerks.ManageDashboardViaApi + }) +} + +// Middleware for managing notifications via API +func (h *HandlerService) ManageNotificationsViaApiCheckMiddleware(next http.Handler) http.Handler { + return h.PremiumPerkCheckMiddleware(next, func(premiumPerks types.PremiumPerks) bool { + return premiumPerks.ConfigureNotificationsViaApi + }) +} + +// middleware check to return if specified dashboard is not archived (and accessible) +func (h *HandlerService) VDBArchivedCheckMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if isMockEnabled, ok := r.Context().Value(ctxIsMockedKey).(bool); ok && isMockEnabled { + next.ServeHTTP(w, r) + return + } + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + if len(dashboardId.Validators) > 0 { + next.ServeHTTP(w, r) + return + } + dashboard, err := h.daService.GetValidatorDashboardInfo(r.Context(), dashboardId.Id) + if err != nil { + handleErr(w, r, err) + return + } + if dashboard.IsArchived { + handleErr(w, r, newForbiddenErr("dashboard with id %v is archived", dashboardId)) + return + } + next.ServeHTTP(w, r) + }) +} + +// middleware that checks for `is_mocked` query param and stores it in the request context. +// should bypass auth checks if the flag is set and cause handlers to return mocked data. +// only allowed for users in the admin or dev group. +// note that mocked data is only returned by handlers that check for it. +func (h *HandlerService) StoreIsMockedFlagMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + isMocked, _ := strconv.ParseBool(r.URL.Query().Get("is_mocked")) + if !isMocked { + next.ServeHTTP(w, r) + return + } + // fetch user group + userId, err := h.GetUserIdBySession(r) + if err != nil { + handleErr(w, r, err) + return + } + userCredentials, err := h.daService.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + allowedGroups := []string{types.UserGroupAdmin, types.UserGroupDev} + if !slices.Contains(allowedGroups, userCredentials.UserGroup) { + handleErr(w, r, newForbiddenErr("user is not allowed to use mock data")) + return + } + // store isMocked flag in context + ctx := r.Context() + ctx = context.WithValue(ctx, ctxIsMockedKey, true) + r = r.WithContext(ctx) + next.ServeHTTP(w, r) + }) +} diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index fc2ed3d03..9c7f5d5e2 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -47,7 +47,7 @@ func (h *HandlerService) PublicGetHealthz(w http.ResponseWriter, r *http.Request } ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) defer cancel() - data := h.dai.GetHealthz(ctx, showAll) + data := h.getDataAccessor(r).GetHealthz(ctx, showAll) responseCode := http.StatusOK if data.TotalOkPercentage != 1 { @@ -74,7 +74,7 @@ func (h *HandlerService) PublicGetUserDashboards(w http.ResponseWriter, r *http. handleErr(w, r, err) return } - data, err := h.dai.GetUserDashboards(r.Context(), userId) + data, err := h.getDataAccessor(r).GetUserDashboards(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -177,12 +177,12 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, true) + dashboardCount, err := h.getDataAccessor(r).GetUserValidatorDashboardCount(r.Context(), userId, true) if err != nil { handleErr(w, r, err) return @@ -192,7 +192,7 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r return } - data, err := h.dai.CreateValidatorDashboard(r.Context(), userId, name, chainId) + data, err := h.getDataAccessor(r).CreateValidatorDashboard(r.Context(), userId, name, chainId) if err != nil { handleErr(w, r, err) return @@ -232,10 +232,10 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h // set name depending on dashboard id var name string if reInteger.MatchString(dashboardIdParam) { - name, err = h.dai.GetValidatorDashboardName(r.Context(), dashboardId.Id) + name, err = h.getDataAccessor(r).GetValidatorDashboardName(r.Context(), dashboardId.Id) } else if reValidatorDashboardPublicId.MatchString(dashboardIdParam) { var publicIdInfo *types.VDBPublicId - publicIdInfo, err = h.dai.GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) + publicIdInfo, err = h.getDataAccessor(r).GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) name = publicIdInfo.Name } if err != nil { @@ -249,7 +249,7 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) if err != nil { handleErr(w, r, err) return @@ -281,7 +281,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r handleErr(w, r, v) return } - err := h.dai.RemoveValidatorDashboard(r.Context(), dashboardId) + err := h.getDataAccessor(r).RemoveValidatorDashboard(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -317,7 +317,7 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, handleErr(w, r, v) return } - data, err := h.dai.UpdateValidatorDashboardName(r.Context(), dashboardId, name) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardName(r.Context(), dashboardId, name) if err != nil { handleErr(w, r, err) return @@ -364,12 +364,12 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(ctx, userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(ctx, userId) if err != nil { handleErr(w, r, err) return } - groupCount, err := h.dai.GetValidatorDashboardGroupCount(ctx, dashboardId) + groupCount, err := h.getDataAccessor(r).GetValidatorDashboardGroupCount(ctx, dashboardId) if err != nil { handleErr(w, r, err) return @@ -379,7 +379,7 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite return } - data, err := h.dai.CreateValidatorDashboardGroup(ctx, dashboardId, name) + data, err := h.getDataAccessor(r).CreateValidatorDashboardGroup(ctx, dashboardId, name) if err != nil { handleErr(w, r, err) return @@ -423,7 +423,7 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter handleErr(w, r, v) return } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -432,7 +432,7 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter returnNotFound(w, r, errors.New("group not found")) return } - data, err := h.dai.UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) if err != nil { handleErr(w, r, err) return @@ -470,7 +470,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnBadRequest(w, r, errors.New("cannot delete default group")) return } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -479,7 +479,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnNotFound(w, r, errors.New("group not found")) return } - err = h.dai.RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) + err = h.getDataAccessor(r).RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -544,7 +544,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW } ctx := r.Context() - groupExists, err := h.dai.GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) + groupExists, err := h.getDataAccessor(r).GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) if err != nil { handleErr(w, r, err) return @@ -558,7 +558,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(ctx, userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(ctx, userId) if err != nil { handleErr(w, r, err) return @@ -568,7 +568,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW return } dashboardLimit := userInfo.PremiumPerks.ValidatorsPerDashboard - existingValidatorCount, err := h.dai.GetValidatorDashboardValidatorsCount(ctx, dashboardId) + existingValidatorCount, err := h.getDataAccessor(r).GetValidatorDashboardValidatorsCount(ctx, dashboardId) if err != nil { handleErr(w, r, err) return @@ -589,7 +589,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - validators, err := h.dai.GetValidatorsFromSlices(indices, pubkeys) + validators, err := h.getDataAccessor(r).GetValidatorsFromSlices(indices, pubkeys) if err != nil { handleErr(w, r, err) return @@ -597,7 +597,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW if len(validators) > int(limit) { validators = validators[:limit] } - data, dataErr = h.dai.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) case req.DepositAddress != "": depositAddress := v.checkRegex(reEthereumAddress, req.DepositAddress, "deposit_address") @@ -605,7 +605,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) case req.WithdrawalAddress != "": withdrawalAddress := v.checkRegex(reWithdrawalCredential, req.WithdrawalAddress, "withdrawal_address") @@ -613,7 +613,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) case req.Graffiti != "": graffiti := v.checkRegex(reGraffiti, req.Graffiti, "graffiti") @@ -621,7 +621,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, v) return } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) + data, dataErr = h.getDataAccessor(r).AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) } if dataErr != nil { @@ -663,7 +663,7 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr handleErr(w, r, v) return } - data, paging, err := h.dai.GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -703,12 +703,12 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons handleErr(w, r, v) return } - validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) + validators, err := h.getDataAccessor(r).GetValidatorsFromSlices(indices, publicKeys) if err != nil { handleErr(w, r, err) return } - err = h.dai.RemoveValidatorDashboardValidators(r.Context(), dashboardId, validators) + err = h.getDataAccessor(r).RemoveValidatorDashboardValidators(r.Context(), dashboardId, validators) if err != nil { handleErr(w, r, err) return @@ -749,7 +749,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr handleErr(w, r, v) return } - publicIdCount, err := h.dai.GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) + publicIdCount, err := h.getDataAccessor(r).GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -759,7 +759,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr return } - data, err := h.dai.CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) + data, err := h.getDataAccessor(r).CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) if err != nil { handleErr(w, r, err) return @@ -805,7 +805,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit handleErr(w, r, v) return } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + fetchedId, err := h.getDataAccessor(r).GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -815,7 +815,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit return } - data, err := h.dai.UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) if err != nil { handleErr(w, r, err) return @@ -847,7 +847,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW handleErr(w, r, v) return } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + fetchedId, err := h.getDataAccessor(r).GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -857,7 +857,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW return } - err = h.dai.RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) + err = h.getDataAccessor(r).RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) if err != nil { handleErr(w, r, err) return @@ -896,7 +896,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri } // check conditions for changing archival status - dashboardInfo, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId) + dashboardInfo, err := h.getDataAccessor(r).GetValidatorDashboardInfo(r.Context(), dashboardId) if err != nil { handleErr(w, r, err) return @@ -914,13 +914,13 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri handleErr(w, r, err) return } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) + dashboardCount, err := h.getDataAccessor(r).GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) if err != nil { handleErr(w, r, err) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -950,7 +950,7 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri archivedReason = &enums.VDBArchivedReasons.User } - data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) + data, err := h.getDataAccessor(r).UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) if err != nil { handleErr(w, r, err) return @@ -984,7 +984,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWrite handleErr(w, r, v) return } - data, err := h.dai.GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) + data, err := h.getDataAccessor(r).GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) if err != nil { handleErr(w, r, err) return @@ -996,8 +996,6 @@ func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWrite returnOk(w, r, response) } -var summaryAllowedPeriods = []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - // PublicGetValidatorDashboardSummary godoc // // @Description Get summary information for a specified dashboard @@ -1026,14 +1024,12 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite protocolModes := v.checkProtocolModes(q.Get("modes")) period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - checkValueInAllowed(&v, period, summaryAllowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return } - data, paging, err := h.dai.GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1073,14 +1069,12 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response } groupId := v.checkGroupId(vars["group_id"], forbidEmpty) period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - checkValueInAllowed(&v, period, summaryAllowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return } - data, err := h.dai.GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1133,7 +1127,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response return } - data, err := h.dai.GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) + data, err := h.getDataAccessor(r).GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) if err != nil { handleErr(w, r, err) return @@ -1167,9 +1161,6 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res q := r.URL.Query() duty := checkEnum[enums.ValidatorDuty](&v, q.Get("duty"), "duty") period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - checkValueInAllowed(&v, period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -1180,13 +1171,13 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res duties := enums.ValidatorDuties switch duty { case duties.None: - indices, err = h.dai.GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) case duties.Sync: - indices, err = h.dai.GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) case duties.Slashed: - indices, err = h.dai.GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) case duties.Proposal: - indices, err = h.dai.GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) + indices, err = h.getDataAccessor(r).GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) } if err != nil { handleErr(w, r, err) @@ -1236,7 +1227,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite return } - data, paging, err := h.dai.GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1277,7 +1268,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1313,7 +1304,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response return } - data, err := h.dai.GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1359,7 +1350,7 @@ func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter return } - data, paging, err := h.dai.GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1401,7 +1392,7 @@ func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter return } - data, paging, err := h.dai.GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1451,7 +1442,7 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite return } - data, err := h.dai.GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) + data, err := h.getDataAccessor(r).GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) if err != nil { handleErr(w, r, err) return @@ -1501,7 +1492,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.Response return } - data, err := h.dai.GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) + data, err := h.getDataAccessor(r).GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) if err != nil { handleErr(w, r, err) return @@ -1536,7 +1527,7 @@ func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1572,7 +1563,7 @@ func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1601,7 +1592,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits( handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) if err != nil { handleErr(w, r, err) return @@ -1629,7 +1620,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits( handleErr(w, r, err) return } - data, err := h.dai.GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) if err != nil { handleErr(w, r, err) return @@ -1671,7 +1662,7 @@ func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseW return } - data, paging, err := h.dai.GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1708,7 +1699,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.Resp return } - data, err := h.dai.GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) if err != nil { handleErr(w, r, err) return @@ -1748,7 +1739,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWr return } - data, paging, err := h.dai.GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1783,7 +1774,7 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.Respo return } - data, err := h.dai.GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) + data, err := h.getDataAccessor(r).GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) if err != nil { handleErr(w, r, err) return @@ -1794,42 +1785,6 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.Respo returnOk(w, r, response) } -// PublicGetValidatorDashboardNodeRocketPool godoc -// -// @Description Get details for a specific Rocket Pool node associated with a specified dashboard. -// @Tags Validator Dashboard -// @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param node_address path string true "The address of the node." -// @Success 200 {object} types.GetValidatorDashboardNodeRocketPoolResponse -// @Failure 400 {object} types.ApiErrorResponse -// @Router /validator-dashboards/{dashboard_id}/rocket-pool/{node_address} [get] -func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - // support ENS names ? - nodeAddress := v.checkAddress(vars["node_address"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardNodeRocketPool(r.Context(), *dashboardId, nodeAddress) - if err != nil { - handleErr(w, r, err) - return - } - response := types.GetValidatorDashboardNodeRocketPoolResponse{ - Data: *data, - } - returnOk(w, r, response) -} - // PublicGetValidatorDashboardRocketPoolMinipools godoc // // @Description Get minipools information for a specified Rocket Pool node associated with a specified dashboard. @@ -1862,7 +1817,7 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.R return } - data, paging, err := h.dai.GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1892,7 +1847,7 @@ func (h *HandlerService) PublicGetUserNotifications(w http.ResponseWriter, r *ht handleErr(w, r, err) return } - data, err := h.dai.GetNotificationOverview(r.Context(), userId) + data, err := h.getDataAccessor(r).GetNotificationOverview(r.Context(), userId) if err != nil { handleErr(w, r, err) return @@ -1927,20 +1882,13 @@ func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWrit q := r.URL.Query() pagingParams := v.checkPagingParams(q) sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) - chainId := v.checkNetworkParameter(q.Get("network")) - chainIds := []uint64{chainId} - // TODO replace with "networks" once multiple networks are supported - //chainIds := v.checkNetworksParameter(q.Get("networks")) + chainIds := v.checkNetworksParameter(q.Get("networks")) if v.hasErrors() { handleErr(w, r, v) return } - dataAccessor := h.dai - if isMockEnabled(r) { - dataAccessor = h.dummy - } - data, paging, err := dataAccessor.GetDashboardNotifications(r.Context(), userId, chainIds, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetDashboardNotifications(r.Context(), userId, chainIds, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1976,11 +1924,7 @@ func (h *HandlerService) PublicGetUserNotificationsValidatorDashboard(w http.Res handleErr(w, r, v) return } - dataAccessor := h.dai - if isMockEnabled(r) { - dataAccessor = h.dummy - } - data, err := dataAccessor.GetValidatorDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) + data, err := h.getDataAccessor(r).GetValidatorDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) if err != nil { handleErr(w, r, err) return @@ -2015,7 +1959,7 @@ func (h *HandlerService) PublicGetUserNotificationsAccountDashboard(w http.Respo handleErr(w, r, v) return } - data, err := h.dai.GetAccountDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) + data, err := h.getDataAccessor(r).GetAccountDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch, search) if err != nil { handleErr(w, r, err) return @@ -2053,7 +1997,7 @@ func (h *HandlerService) PublicGetUserNotificationMachines(w http.ResponseWriter handleErr(w, r, v) return } - data, paging, err := h.dai.GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2092,7 +2036,7 @@ func (h *HandlerService) PublicGetUserNotificationClients(w http.ResponseWriter, handleErr(w, r, v) return } - data, paging, err := h.dai.GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2131,7 +2075,7 @@ func (h *HandlerService) PublicGetUserNotificationRocketPool(w http.ResponseWrit handleErr(w, r, v) return } - data, paging, err := h.dai.GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2169,7 +2113,7 @@ func (h *HandlerService) PublicGetUserNotificationNetworks(w http.ResponseWriter handleErr(w, r, v) return } - data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -2197,19 +2141,19 @@ func (h *HandlerService) PublicGetUserNotificationSettings(w http.ResponseWriter handleErr(w, r, err) return } - data, err := h.dai.GetNotificationSettings(r.Context(), userId) + data, err := h.getDataAccessor(r).GetNotificationSettings(r.Context(), userId) if err != nil { handleErr(w, r, err) return } // check premium perks - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - defaultSettings, err := h.dai.GetNotificationSettingsDefaultValues(r.Context()) + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) if err != nil { handleErr(w, r, err) return @@ -2272,12 +2216,12 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons } // check premium perks - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - defaultSettings, err := h.dai.GetNotificationSettingsDefaultValues(r.Context()) + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) if err != nil { handleErr(w, r, err) return @@ -2293,7 +2237,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons return } - err = h.dai.UpdateNotificationSettingsGeneral(r.Context(), userId, req) + err = h.getDataAccessor(r).UpdateNotificationSettingsGeneral(r.Context(), userId, req) if err != nil { handleErr(w, r, err) return @@ -2358,7 +2302,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon IsNewRewardRoundSubscribed: req.IsNewRewardRoundSubscribed, } - err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, settings) + err = h.getDataAccessor(r).UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, settings) if err != nil { handleErr(w, r, err) return @@ -2401,13 +2345,13 @@ func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.R return } // TODO use a better way to validate the paired device id - pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") + pairedDeviceId := v.checkUint(mux.Vars(r)["paired_device_id"], "paired_device_id") name := v.checkNameNotEmpty(req.Name) if v.hasErrors() { handleErr(w, r, v) return } - err = h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId, name, req.IsNotificationsEnabled) + err = h.getDataAccessor(r).UpdateNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId, name, req.IsNotificationsEnabled) if err != nil { handleErr(w, r, err) return @@ -2442,12 +2386,12 @@ func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w htt return } // TODO use a better way to validate the paired device id - pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") + pairedDeviceId := v.checkUint(mux.Vars(r)["paired_device_id"], "paired_device_id") if v.hasErrors() { handleErr(w, r, v) return } - err = h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId) + err = h.getDataAccessor(r).DeleteNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId) if err != nil { handleErr(w, r, err) return @@ -2487,7 +2431,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsClient(w http.Response handleErr(w, r, v) return } - data, err := h.dai.UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) + data, err := h.getDataAccessor(r).UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) if err != nil { handleErr(w, r, err) return @@ -2525,19 +2469,19 @@ func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.Resp handleErr(w, r, v) return } - data, paging, err := h.dai.GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + data, paging, err := h.getDataAccessor(r).GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) if err != nil { handleErr(w, r, err) return } // if users premium perks do not allow subscriptions, set them to false in the response // TODO: once stripe payments run in v2, this should be removed and the notification settings should be updated upon a tier change instead - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - defaultSettings, err := h.dai.GetNotificationSettingsDefaultValues(r.Context()) + defaultSettings, err := h.getDataAccessor(r).GetNotificationSettingsDefaultValues(r.Context()) if err != nil { handleErr(w, r, err) return @@ -2551,12 +2495,9 @@ func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.Resp handleErr(w, r, errors.New("invalid settings type")) return } - if !userInfo.PremiumPerks.NotificationsValidatorDashboardGroupOffline && settings.IsGroupOfflineSubscribed { - settings.IsGroupOfflineSubscribed = false - settings.GroupOfflineThreshold = defaultSettings.GroupOfflineThreshold - } - if !userInfo.PremiumPerks.NotificationsValidatorDashboardRealTimeMode && settings.IsRealTimeModeEnabled { - settings.IsRealTimeModeEnabled = false + if !userInfo.PremiumPerks.NotificationsValidatorDashboardGroupEfficiency && settings.IsGroupEfficiencyBelowSubscribed { + settings.IsGroupEfficiencyBelowSubscribed = false + settings.GroupEfficiencyBelowThreshold = defaultSettings.GroupEfficiencyBelowThreshold } data[i].Settings = settings } @@ -2593,7 +2534,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h handleErr(w, r, err) return } - checkMinMax(&v, req.GroupOfflineThreshold, 0, 1, "group_offline_threshold") + checkMinMax(&v, req.GroupEfficiencyBelowThreshold, 0, 1, "group_offline_threshold") vars := mux.Vars(r) dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) groupId := v.checkExistingGroupId(vars["group_id"]) @@ -2604,21 +2545,17 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h handleErr(w, r, v) return } - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + userInfo, err := h.getDataAccessor(r).GetUserInfo(r.Context(), userId) if err != nil { handleErr(w, r, err) return } - if !userInfo.PremiumPerks.NotificationsValidatorDashboardGroupOffline && req.IsGroupOfflineSubscribed { - returnForbidden(w, r, errors.New("user does not have premium perks to subscribe group offline")) - return - } - if !userInfo.PremiumPerks.NotificationsValidatorDashboardRealTimeMode && req.IsRealTimeModeEnabled { - returnForbidden(w, r, errors.New("user does not have premium perks to subscribe real time mode")) + if !userInfo.PremiumPerks.NotificationsValidatorDashboardGroupEfficiency && req.IsGroupEfficiencyBelowSubscribed { + returnForbidden(w, r, errors.New("user does not have premium perks to subscribe group efficiency event")) return } - err = h.dai.UpdateNotificationSettingsValidatorDashboard(r.Context(), userId, dashboardId, groupId, req) + err = h.getDataAccessor(r).UpdateNotificationSettingsValidatorDashboard(r.Context(), userId, dashboardId, groupId, req) if err != nil { handleErr(w, r, err) return @@ -2691,7 +2628,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsAccountDashboard(w htt IsERC721TokenTransfersSubscribed: req.IsERC721TokenTransfersSubscribed, IsERC1155TokenTransfersSubscribed: req.IsERC1155TokenTransfersSubscribed, } - err = h.dai.UpdateNotificationSettingsAccountDashboard(r.Context(), userId, dashboardId, groupId, settings) + err = h.getDataAccessor(r).UpdateNotificationSettingsAccountDashboard(r.Context(), userId, dashboardId, groupId, settings) if err != nil { handleErr(w, r, err) return @@ -2711,7 +2648,16 @@ func (h *HandlerService) PublicPutUserNotificationSettingsAccountDashboard(w htt // @Success 204 // @Router /users/me/notifications/test-email [post] func (h *HandlerService) PublicPostUserNotificationsTestEmail(w http.ResponseWriter, r *http.Request) { - // TODO + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + err = h.getDataAccessor(r).QueueTestEmailNotification(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } returnNoContent(w, r) } @@ -2724,7 +2670,16 @@ func (h *HandlerService) PublicPostUserNotificationsTestEmail(w http.ResponseWri // @Success 204 // @Router /users/me/notifications/test-push [post] func (h *HandlerService) PublicPostUserNotificationsTestPush(w http.ResponseWriter, r *http.Request) { - // TODO + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + err = h.getDataAccessor(r).QueueTestPushNotification(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } returnNoContent(w, r) } @@ -2741,6 +2696,11 @@ func (h *HandlerService) PublicPostUserNotificationsTestPush(w http.ResponseWrit // @Router /users/me/notifications/test-webhook [post] func (h *HandlerService) PublicPostUserNotificationsTestWebhook(w http.ResponseWriter, r *http.Request) { var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } type request struct { WebhookUrl string `json:"webhook_url"` IsDiscordWebhookEnabled bool `json:"is_discord_webhook_enabled,omitempty"` @@ -2754,7 +2714,11 @@ func (h *HandlerService) PublicPostUserNotificationsTestWebhook(w http.ResponseW handleErr(w, r, v) return } - // TODO + err = h.getDataAccessor(r).QueueTestWebhookNotification(r.Context(), userId, req.WebhookUrl, req.IsDiscordWebhookEnabled) + if err != nil { + handleErr(w, r, err) + return + } returnNoContent(w, r) } diff --git a/backend/pkg/api/handlers/search_handlers.go b/backend/pkg/api/handlers/search_handlers.go index 3b8f058a1..49d43acdd 100644 --- a/backend/pkg/api/handlers/search_handlers.go +++ b/backend/pkg/api/handlers/search_handlers.go @@ -120,208 +120,163 @@ func (h *HandlerService) InternalPostSearch(w http.ResponseWriter, r *http.Reque // Search Helper Functions func (h *HandlerService) handleSearch(ctx context.Context, input string, searchType searchTypeKey, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil + switch searchType { + case validatorByIndex: + return h.handleSearchValidatorByIndex(ctx, input, chainId) + case validatorByPublicKey: + return h.handleSearchValidatorByPublicKey(ctx, input, chainId) + case validatorsByDepositAddress: + return h.handleSearchValidatorsByDepositAddress(ctx, input, chainId) + case validatorsByDepositEnsName: + return h.handleSearchValidatorsByDepositEnsName(ctx, input, chainId) + case validatorsByWithdrawalCredential: + return h.handleSearchValidatorsByWithdrawalCredential(ctx, input, chainId) + case validatorsByWithdrawalAddress: + return h.handleSearchValidatorsByWithdrawalAddress(ctx, input, chainId) + case validatorsByWithdrawalEns: + return h.handleSearchValidatorsByWithdrawalEnsName(ctx, input, chainId) + case validatorsByGraffiti: + return h.handleSearchValidatorsByGraffiti(ctx, input, chainId) default: - switch searchType { - case validatorByIndex: - return h.handleSearchValidatorByIndex(ctx, input, chainId) - case validatorByPublicKey: - return h.handleSearchValidatorByPublicKey(ctx, input, chainId) - case validatorsByDepositAddress: - return h.handleSearchValidatorsByDepositAddress(ctx, input, chainId) - case validatorsByDepositEnsName: - return h.handleSearchValidatorsByDepositEnsName(ctx, input, chainId) - case validatorsByWithdrawalCredential: - return h.handleSearchValidatorsByWithdrawalCredential(ctx, input, chainId) - case validatorsByWithdrawalAddress: - return h.handleSearchValidatorsByWithdrawalAddress(ctx, input, chainId) - case validatorsByWithdrawalEns: - return h.handleSearchValidatorsByWithdrawalEnsName(ctx, input, chainId) - case validatorsByGraffiti: - return h.handleSearchValidatorsByGraffiti(ctx, input, chainId) - default: - return nil, errors.New("invalid search type") - } + return nil, errors.New("invalid search type") } } func (h *HandlerService) handleSearchValidatorByIndex(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - index, err := strconv.ParseUint(input, 10, 64) - if err != nil { - // input should've been checked by the regex before, this should never happen - return nil, err - } - result, err := h.dai.GetSearchValidatorByIndex(ctx, chainId, index) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorByIndex), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.PublicKey), - NumValue: &result.Index, - }, nil + index, err := strconv.ParseUint(input, 10, 64) + if err != nil { + // input should've been checked by the regex before, this should never happen + return nil, err + } + result, err := h.daService.GetSearchValidatorByIndex(ctx, chainId, index) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorByIndex), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.PublicKey), + NumValue: &result.Index, + }, nil } func (h *HandlerService) handleSearchValidatorByPublicKey(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - publicKey, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - // input should've been checked by the regex before, this should never happen - return nil, err - } - result, err := h.dai.GetSearchValidatorByPublicKey(ctx, chainId, publicKey) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorByPublicKey), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.PublicKey), - NumValue: &result.Index, - }, nil + publicKey, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + // input should've been checked by the regex before, this should never happen + return nil, err } + result, err := h.daService.GetSearchValidatorByPublicKey(ctx, chainId, publicKey) + if err != nil { + return nil, err + } + + return &types.SearchResult{ + Type: string(validatorByPublicKey), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.PublicKey), + NumValue: &result.Index, + }, nil } func (h *HandlerService) handleSearchValidatorsByDepositAddress(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - address, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByDepositAddress(ctx, chainId, address) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByDepositAddress), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + address, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + return nil, err + } + result, err := h.daService.GetSearchValidatorsByDepositAddress(ctx, chainId, address) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByDepositAddress), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByDepositEnsName(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByDepositEnsName(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByDepositEnsName), - ChainId: chainId, - StrValue: result.EnsName, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByDepositEnsName(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByDepositEnsName), + ChainId: chainId, + StrValue: result.EnsName, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalCredential(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - withdrawalCredential, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalCredential), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), - NumValue: &result.Count, - }, nil + withdrawalCredential, err := hex.DecodeString(strings.TrimPrefix(input, "0x")) + if err != nil { + return nil, err } + result, err := h.daService.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) + if err != nil { + return nil, err + } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalCredential), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalAddress(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - withdrawalString := "010000000000000000000000" + strings.TrimPrefix(input, "0x") - withdrawalCredential, err := hex.DecodeString(withdrawalString) - if err != nil { - return nil, err - } - result, err := h.dai.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalAddress), - ChainId: chainId, - HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), - NumValue: &result.Count, - }, nil + withdrawalString := "010000000000000000000000" + strings.TrimPrefix(input, "0x") + withdrawalCredential, err := hex.DecodeString(withdrawalString) + if err != nil { + return nil, err + } + result, err := h.daService.GetSearchValidatorsByWithdrawalCredential(ctx, chainId, withdrawalCredential) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalAddress), + ChainId: chainId, + HashValue: "0x" + hex.EncodeToString(result.WithdrawalCredential), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByWithdrawalEnsName(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByWithdrawalEnsName(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByWithdrawalEns), - ChainId: chainId, - StrValue: result.EnsName, - HashValue: "0x" + hex.EncodeToString(result.Address), - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByWithdrawalEnsName(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByWithdrawalEns), + ChainId: chainId, + StrValue: result.EnsName, + HashValue: "0x" + hex.EncodeToString(result.Address), + NumValue: &result.Count, + }, nil } func (h *HandlerService) handleSearchValidatorsByGraffiti(ctx context.Context, input string, chainId uint64) (*types.SearchResult, error) { - select { - case <-ctx.Done(): - return nil, nil - default: - result, err := h.dai.GetSearchValidatorsByGraffiti(ctx, chainId, input) - if err != nil { - return nil, err - } - - return &types.SearchResult{ - Type: string(validatorsByGraffiti), - ChainId: chainId, - StrValue: result.Graffiti, - NumValue: &result.Count, - }, nil + result, err := h.daService.GetSearchValidatorsByGraffiti(ctx, chainId, input) + if err != nil { + return nil, err } + + return &types.SearchResult{ + Type: string(validatorsByGraffiti), + ChainId: chainId, + StrValue: result.Graffiti, + NumValue: &result.Count, + }, nil } // -------------------------------------- diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index c58f7275c..7e4f3699d 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -25,9 +25,10 @@ func NewApiRouter(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAcc router := mux.NewRouter() apiRouter := router.PathPrefix("/api").Subrouter() publicRouter := apiRouter.PathPrefix("/v2").Subrouter() + legacyRouter := apiRouter.PathPrefix("/v1").Subrouter() internalRouter := apiRouter.PathPrefix("/i").Subrouter() sessionManager := newSessionManager(cfg) - internalRouter.Use(sessionManager.LoadAndSave) + internalRouter.Use(sessionManager.LoadAndSave, getSlidingSessionExpirationMiddleware(sessionManager)) if !(cfg.Frontend.CsrfInsecure || cfg.Frontend.Debug) { internalRouter.Use(getCsrfProtectionMiddleware(cfg), csrfInjecterMiddleware) @@ -38,7 +39,13 @@ func NewApiRouter(dataAccessor dataaccess.DataAccessor, dummy dataaccess.DataAcc publicRouter.Use(handlerService.StoreUserIdByApiKeyMiddleware) internalRouter.Use(handlerService.StoreUserIdBySessionMiddleware) + if cfg.DeploymentType != "production" { + publicRouter.Use(handlerService.StoreIsMockedFlagMiddleware) + internalRouter.Use(handlerService.StoreIsMockedFlagMiddleware) + } + addRoutes(handlerService, publicRouter, internalRouter, cfg) + addLegacyRoutes(handlerService, legacyRouter) // serve static files publicRouter.PathPrefix("/docs/").Handler(http.StripPrefix("/api/v2/docs/", http.FileServer(http.FS(docs.Files)))) @@ -243,6 +250,11 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro addEndpointsToRouters(endpoints, publicRouter, internalRouter) } +// Legacy routes are available behind the /v1 prefix and guarantee backwards compatibility with the old API +func addLegacyRoutes(hs *handlers.HandlerService, publicRouter *mux.Router) { + publicRouter.HandleFunc("/client/metrics", hs.LegacyPostUserMachineMetrics).Methods(http.MethodPost, http.MethodOptions) +} + func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Router, cfg *types.Config) { vdbPath := "/validator-dashboards" publicRouter.HandleFunc(vdbPath, hs.PublicPostValidatorDashboards).Methods(http.MethodPost, http.MethodOptions) @@ -304,9 +316,9 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte {http.MethodGet, "/{dashboard_id}/total-withdrawals", hs.PublicGetValidatorDashboardTotalWithdrawals, hs.InternalGetValidatorDashboardTotalWithdrawals}, {http.MethodGet, "/{dashboard_id}/rocket-pool", hs.PublicGetValidatorDashboardRocketPool, hs.InternalGetValidatorDashboardRocketPool}, {http.MethodGet, "/{dashboard_id}/total-rocket-pool", hs.PublicGetValidatorDashboardTotalRocketPool, hs.InternalGetValidatorDashboardTotalRocketPool}, - {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}", hs.PublicGetValidatorDashboardNodeRocketPool, hs.InternalGetValidatorDashboardNodeRocketPool}, {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}/minipools", hs.PublicGetValidatorDashboardRocketPoolMinipools, hs.InternalGetValidatorDashboardRocketPoolMinipools}, - {http.MethodGet, "/{dashboard_id}/mobile-widget", nil, hs.InternalGetValidatorDashboardMobileWidget}, + {http.MethodGet, "/{dashboard_id}/mobile/widget", nil, hs.InternalGetValidatorDashboardMobileWidget}, + {http.MethodGet, "/{dashboard_id}/mobile/validators", nil, hs.InternalGetValidatorDashboardMobileValidators}, } addEndpointsToRouters(endpoints, publicDashboardRouter, internalDashboardRouter) } diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index a57ebf982..24e2b9522 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -142,8 +142,8 @@ type ChartHistorySeconds struct { } type IndexEpoch struct { - Index uint64 - Epoch uint64 + Index uint64 `json:"index"` + Epoch uint64 `json:"epoch"` } type IndexBlocks struct { @@ -151,6 +151,11 @@ type IndexBlocks struct { Blocks []uint64 `json:"blocks"` } +type IndexSlots struct { + Index uint64 `json:"index"` + Slots []uint64 `json:"slots"` +} + type ValidatorStateCounts struct { Online uint64 `json:"online"` Offline uint64 `json:"offline"` diff --git a/backend/pkg/api/types/data_access.go b/backend/pkg/api/types/data_access.go index db35871fb..6a2859f6f 100644 --- a/backend/pkg/api/types/data_access.go +++ b/backend/pkg/api/types/data_access.go @@ -16,6 +16,7 @@ const DefaultGroupId = 0 const AllGroups = -1 const NetworkAverage = -2 const DefaultGroupName = "default" +const DefaultDashboardName = DefaultGroupName type Sort[T enums.Enum] struct { Column T @@ -185,8 +186,9 @@ type NotificationsDashboardsCursor struct { } type NetworkInfo struct { - ChainId uint64 - Name string + ChainId uint64 + Name string + NotificationsName string } type ClientInfo struct { @@ -307,7 +309,7 @@ type MobileAppBundleStats struct { // Notification structs type NotificationSettingsDefaultValues struct { - GroupOfflineThreshold float64 + GroupEfficiencyBelowThreshold float64 MaxCollateralThreshold float64 MinCollateralThreshold float64 ERC20TokenTransfersValueThreshold float64 diff --git a/backend/pkg/api/types/machine_metrics.go b/backend/pkg/api/types/machine_metrics.go index 059c06471..dd0b05fab 100644 --- a/backend/pkg/api/types/machine_metrics.go +++ b/backend/pkg/api/types/machine_metrics.go @@ -1,79 +1,11 @@ package types -type MachineMetricSystem struct { - Timestamp uint64 `json:"timestamp,omitempty" faker:"boundary_start=1725166800, boundary_end=1725177600"` - ExporterVersion string `json:"exporter_version,omitempty"` - // system - CpuCores uint64 `json:"cpu_cores,omitempty"` - CpuThreads uint64 `json:"cpu_threads,omitempty"` - CpuNodeSystemSecondsTotal uint64 `json:"cpu_node_system_seconds_total,omitempty"` - CpuNodeUserSecondsTotal uint64 `json:"cpu_node_user_seconds_total,omitempty"` - CpuNodeIowaitSecondsTotal uint64 `json:"cpu_node_iowait_seconds_total,omitempty"` - CpuNodeIdleSecondsTotal uint64 `json:"cpu_node_idle_seconds_total,omitempty"` - MemoryNodeBytesTotal uint64 `json:"memory_node_bytes_total,omitempty"` - MemoryNodeBytesFree uint64 `json:"memory_node_bytes_free,omitempty"` - MemoryNodeBytesCached uint64 `json:"memory_node_bytes_cached,omitempty"` - MemoryNodeBytesBuffers uint64 `json:"memory_node_bytes_buffers,omitempty"` - DiskNodeBytesTotal uint64 `json:"disk_node_bytes_total,omitempty"` - DiskNodeBytesFree uint64 `json:"disk_node_bytes_free,omitempty"` - DiskNodeIoSeconds uint64 `json:"disk_node_io_seconds,omitempty"` - DiskNodeReadsTotal uint64 `json:"disk_node_reads_total,omitempty"` - DiskNodeWritesTotal uint64 `json:"disk_node_writes_total,omitempty"` - NetworkNodeBytesTotalReceive uint64 `json:"network_node_bytes_total_receive,omitempty"` - NetworkNodeBytesTotalTransmit uint64 `json:"network_node_bytes_total_transmit,omitempty"` - MiscNodeBootTsSeconds uint64 `json:"misc_node_boot_ts_seconds,omitempty"` - MiscOs string `json:"misc_os,omitempty"` - // do not store in bigtable but include them in generated model - Machine *string `json:"machine,omitempty"` -} - -type MachineMetricValidator struct { - Timestamp uint64 `json:"timestamp,omitempty" faker:"boundary_start=1725166800, boundary_end=1725177600"` - ExporterVersion string `json:"exporter_version,omitempty"` - // process - CpuProcessSecondsTotal uint64 `json:"cpu_process_seconds_total,omitempty"` - MemoryProcessBytes uint64 `json:"memory_process_bytes,omitempty"` - ClientName string `json:"client_name,omitempty"` - ClientVersion string `json:"client_version,omitempty"` - ClientBuild uint64 `json:"client_build,omitempty"` - SyncEth2FallbackConfigured bool `json:"sync_eth2_fallback_configured,omitempty"` - SyncEth2FallbackConnected bool `json:"sync_eth2_fallback_connected,omitempty"` - // validator - ValidatorTotal uint64 `json:"validator_total,omitempty"` - ValidatorActive uint64 `json:"validator_active,omitempty"` - // do not store in bigtable but include them in generated model - Machine *string `json:"machine,omitempty"` -} - -type MachineMetricNode struct { - Timestamp uint64 `json:"timestamp,omitempty" faker:"boundary_start=1725166800, boundary_end=1725177600"` - ExporterVersion string `json:"exporter_version,omitempty"` - // process - CpuProcessSecondsTotal uint64 `json:"cpu_process_seconds_total,omitempty"` - MemoryProcessBytes uint64 `json:"memory_process_bytes,omitempty"` - ClientName string `json:"client_name,omitempty"` - ClientVersion string `json:"client_version,omitempty"` - ClientBuild uint64 `json:"client_build,omitempty"` - SyncEth2FallbackConfigured bool `json:"sync_eth2_fallback_configured,omitempty"` - SyncEth2FallbackConnected bool `json:"sync_eth2_fallback_connected,omitempty"` - // node - DiskBeaconchainBytesTotal uint64 `json:"disk_beaconchain_bytes_total,omitempty"` - NetworkLibp2PBytesTotalReceive uint64 `json:"network_libp2p_bytes_total_receive,omitempty"` - NetworkLibp2PBytesTotalTransmit uint64 `json:"network_libp2p_bytes_total_transmit,omitempty"` - NetworkPeersConnected uint64 `json:"network_peers_connected,omitempty"` - SyncEth1Connected bool `json:"sync_eth1_connected,omitempty"` - SyncEth2Synced bool `json:"sync_eth2_synced,omitempty"` - SyncBeaconHeadSlot uint64 `json:"sync_beacon_head_slot,omitempty"` - SyncEth1FallbackConfigured bool `json:"sync_eth1_fallback_configured,omitempty"` - SyncEth1FallbackConnected bool `json:"sync_eth1_fallback_connected,omitempty"` - // do not store in bigtable but include them in generated model - Machine *string `json:"machine,omitempty"` -} +import "github.com/gobitfly/beaconchain/pkg/commons/types" type MachineMetricsData struct { - SystemMetrics []*MachineMetricSystem `json:"system_metrics" faker:"slice_len=30"` - ValidatorMetrics []*MachineMetricValidator `json:"validator_metrics" faker:"slice_len=30"` - NodeMetrics []*MachineMetricNode `json:"node_metrics" faker:"slice_len=30"` + SystemMetrics []*types.MachineMetricSystem `json:"system_metrics" faker:"slice_len=30"` + ValidatorMetrics []*types.MachineMetricValidator `json:"validator_metrics" faker:"slice_len=30"` + NodeMetrics []*types.MachineMetricNode `json:"node_metrics" faker:"slice_len=30"` } type GetUserMachineMetricsRespone ApiDataResponse[MachineMetricsData] diff --git a/backend/pkg/api/types/mobile.go b/backend/pkg/api/types/mobile.go index cb662370b..41a5e21d5 100644 --- a/backend/pkg/api/types/mobile.go +++ b/backend/pkg/api/types/mobile.go @@ -21,3 +21,26 @@ type MobileWidgetData struct { } type InternalGetValidatorDashboardMobileWidgetResponse ApiDataResponse[MobileWidgetData] + +type MobileValidatorDashboardValidatorsRocketPool struct { + DepositAmount decimal.Decimal `json:"deposit_Amount"` + Commission float64 `json:"commission"` // percentage, 0-1 + Status string `json:"status" tstype:"'Staking' | 'Dissolved' | 'Prelaunch' | 'Initialized' | 'Withdrawable'" faker:"oneof: Staking, Dissolved, Prelaunch, Initialized, Withdrawable"` + PenaltyCount uint64 `json:"penalty_count"` + IsInSmoothingPool bool `json:"is_in_smokaothing_pool"` +} +type MobileValidatorDashboardValidatorsTableRow struct { + Index uint64 `json:"index"` + PublicKey PubKey `json:"public_key"` + GroupId uint64 `json:"group_id"` + Balance decimal.Decimal `json:"balance"` + Status string `json:"status" tstype:"'slashed' | 'exited' | 'deposited' | 'pending' | 'slashing_offline' | 'slashing_online' | 'exiting_offline' | 'exiting_online' | 'active_offline' | 'active_online'" faker:"oneof: slashed, exited, deposited, pending, slashing_offline, slashing_online, exiting_offline, exiting_online, active_offline, active_online"` + QueuePosition *uint64 `json:"queue_position,omitempty"` + WithdrawalCredential Hash `json:"withdrawal_credential"` + // additional mobile fields + IsInSyncCommittee bool `json:"is_in_sync_committee"` + Efficiency float64 `json:"efficiency"` + RocketPool *MobileValidatorDashboardValidatorsRocketPool `json:"rocket_pool,omitempty"` +} + +type InternalGetValidatorDashboardMobileValidatorsResponse ApiPagingResponse[MobileValidatorDashboardValidatorsTableRow] diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index f9eb491f9..777cad8b3 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -36,11 +36,11 @@ type NotificationDashboardsTableRow struct { ChainId uint64 `db:"chain_id" json:"chain_id"` Epoch uint64 `db:"epoch" json:"epoch"` DashboardId uint64 `db:"dashboard_id" json:"dashboard_id"` - DashboardName string `db:"dashboard_name" json:"-"` // not exported, internal use only + DashboardName string `db:"dashboard_name" json:"dashboard_name"` GroupId uint64 `db:"group_id" json:"group_id"` GroupName string `db:"group_name" json:"group_name"` EntityCount uint64 `db:"entity_count" json:"entity_count"` - EventTypes pq.StringArray `db:"event_types" json:"event_types" tstype:"('validator_online' | 'validator_offline' | 'group_online' | 'group_offline' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'max_collateral' | 'min_collateral' | 'sync' | 'withdrawal' | 'validator_got_slashed' | 'validator_has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]" faker:"slice_len=2, oneof: validator_online, validator_offline, group_online, group_offline, attestation_missed, proposal_success, proposal_missed, proposal_upcoming, max_collateral, min_collateral, sync, withdrawal, validator_got_slashed, validator_has_slashed, incoming_tx, outgoing_tx, transfer_erc20, transfer_erc721, transfer_erc1155"` + EventTypes pq.StringArray `db:"event_types" json:"event_types" tstype:"('validator_online' | 'validator_offline' | 'group_efficiency_below' | 'attestation_missed' | 'proposal_success' | 'proposal_missed' | 'proposal_upcoming' | 'max_collateral' | 'min_collateral' | 'sync' | 'withdrawal' | 'validator_got_slashed' | 'validator_has_slashed' | 'incoming_tx' | 'outgoing_tx' | 'transfer_erc20' | 'transfer_erc721' | 'transfer_erc1155')[]" faker:"slice_len=2, oneof: validator_online, validator_offline, group_efficiency_below, attestation_missed, proposal_success, proposal_missed, proposal_upcoming, max_collateral, min_collateral, sync, withdrawal, validator_got_slashed, validator_has_slashed, incoming_tx, outgoing_tx, transfer_erc20, transfer_erc721, transfer_erc1155"` } type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[NotificationDashboardsTableRow] @@ -48,35 +48,31 @@ type InternalGetUserNotificationDashboardsResponse ApiPagingResponse[Notificatio // ------------------------------------------------------------ // Validator Dashboard Notification Detail -type NotificationEventGroup struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` -} -type NotificationEventGroupBackOnline struct { - GroupName string `json:"group_name"` - DashboardID uint64 `json:"dashboard_id"` - EpochCount uint64 `json:"epoch_count"` -} - type NotificationEventValidatorBackOnline struct { Index uint64 `json:"index"` EpochCount uint64 `json:"epoch_count"` } +type NotificationEventWithdrawal struct { + Index uint64 `json:"index"` + Amount decimal.Decimal `json:"amount"` + Address Address `json:"address"` +} + type NotificationValidatorDashboardDetail struct { - ValidatorOffline []uint64 `json:"validator_offline"` // validator indices - GroupOffline []NotificationEventGroup `json:"group_offline"` // TODO not filled yet - ProposalMissed []IndexBlocks `json:"proposal_missed"` + DashboardName string `db:"dashboard_name" json:"dashboard_name"` + GroupName string `db:"group_name" json:"group_name"` + ValidatorOffline []uint64 `json:"validator_offline"` // validator indices + GroupEfficiencyBelow float64 `json:"group_efficiency_below,omitempty"` // fill with the `group_efficiency_below` threshold if event is present + ProposalMissed []IndexSlots `json:"proposal_missed"` ProposalDone []IndexBlocks `json:"proposal_done"` - UpcomingProposals []IndexBlocks `json:"upcoming_proposals"` + UpcomingProposals []IndexSlots `json:"upcoming_proposals"` Slashed []uint64 `json:"slashed"` // validator indices SyncCommittee []uint64 `json:"sync_committee"` // validator indices AttestationMissed []IndexEpoch `json:"attestation_missed"` // index (epoch) - Withdrawal []IndexBlocks `json:"withdrawal"` + Withdrawal []NotificationEventWithdrawal `json:"withdrawal"` ValidatorOfflineReminder []uint64 `json:"validator_offline_reminder"` // validator indices; TODO not filled yet - GroupOfflineReminder []NotificationEventGroup `json:"group_offline_reminder"` // TODO not filled yet ValidatorBackOnline []NotificationEventValidatorBackOnline `json:"validator_back_online"` - GroupBackOnline []NotificationEventGroupBackOnline `json:"group_back_online"` // TODO not filled yet MinimumCollateralReached []Address `json:"min_collateral_reached"` // node addresses MaximumCollateralReached []Address `json:"max_collateral_reached"` // node addresses } @@ -162,7 +158,7 @@ type NotificationNetwork struct { type InternalPutUserNotificationSettingsNetworksResponse ApiDataResponse[NotificationNetwork] type NotificationPairedDevice struct { - Id string `json:"id"` + Id uint64 `json:"id"` PairedTimestamp int64 `json:"paired_timestamp"` Name string `json:"name,omitempty"` IsNotificationsEnabled bool `json:"is_notifications_enabled"` @@ -204,11 +200,10 @@ type InternalGetUserNotificationSettingsResponse ApiDataResponse[NotificationSet type NotificationSettingsValidatorDashboard struct { WebhookUrl string `json:"webhook_url" faker:"url"` IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled"` - IsRealTimeModeEnabled bool `json:"is_real_time_mode_enabled"` IsValidatorOfflineSubscribed bool `json:"is_validator_offline_subscribed"` - IsGroupOfflineSubscribed bool `json:"is_group_offline_subscribed"` - GroupOfflineThreshold float64 `json:"group_offline_threshold" faker:"boundary_start=0, boundary_end=1"` + IsGroupEfficiencyBelowSubscribed bool `json:"is_group_efficiency_below_subscribed"` + GroupEfficiencyBelowThreshold float64 `json:"group_efficiency_below_threshold" faker:"boundary_start=0, boundary_end=1"` IsAttestationsMissedSubscribed bool `json:"is_attestations_missed_subscribed"` IsBlockProposalSubscribed bool `json:"is_block_proposal_subscribed"` IsUpcomingBlockProposalSubscribed bool `json:"is_upcoming_block_proposal_subscribed"` @@ -242,6 +237,7 @@ type InternalPutUserNotificationSettingsAccountDashboardResponse ApiDataResponse type NotificationSettingsDashboardsTableRow struct { IsAccountDashboard bool `json:"is_account_dashboard"` // if false it's a validator dashboard DashboardId uint64 `json:"dashboard_id"` + DashboardName string `json:"dashboard_name"` GroupId uint64 `json:"group_id"` GroupName string `json:"group_name"` // if it's a validator dashboard, Settings is NotificationSettingsAccountDashboard, otherwise NotificationSettingsValidatorDashboard diff --git a/backend/pkg/api/types/user.go b/backend/pkg/api/types/user.go index 47dd0b3fd..8e4a30dc7 100644 --- a/backend/pkg/api/types/user.go +++ b/backend/pkg/api/types/user.go @@ -1,6 +1,7 @@ package types const UserGroupAdmin = "ADMIN" +const UserGroupDev = "DEV" type UserInfo struct { Id uint64 `json:"id"` @@ -116,25 +117,24 @@ type ExtraDashboardValidatorsPremiumAddon struct { } type PremiumPerks struct { - AdFree bool `json:"ad_free"` // note that this is somhow redunant, since there is already ApiPerks.NoAds - ValidatorDashboards uint64 `json:"validator_dashboards"` - ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` - ValidatorGroupsPerDashboard uint64 `json:"validator_groups_per_dashboard"` - ShareCustomDashboards bool `json:"share_custom_dashboards"` - ManageDashboardViaApi bool `json:"manage_dashboard_via_api"` - BulkAdding bool `json:"bulk_adding"` - ChartHistorySeconds ChartHistorySeconds `json:"chart_history_seconds"` - EmailNotificationsPerDay uint64 `json:"email_notifications_per_day"` - ConfigureNotificationsViaApi bool `json:"configure_notifications_via_api"` - ValidatorGroupNotifications uint64 `json:"validator_group_notifications"` - WebhookEndpoints uint64 `json:"webhook_endpoints"` - MobileAppCustomThemes bool `json:"mobile_app_custom_themes"` - MobileAppWidget bool `json:"mobile_app_widget"` - MonitorMachines uint64 `json:"monitor_machines"` - MachineMonitoringHistorySeconds uint64 `json:"machine_monitoring_history_seconds"` - NotificationsMachineCustomThreshold bool `json:"notifications_machine_custom_threshold"` - NotificationsValidatorDashboardRealTimeMode bool `json:"notifications_validator_dashboard_real_time_mode"` - NotificationsValidatorDashboardGroupOffline bool `json:"notifications_validator_dashboard_group_offline"` + AdFree bool `json:"ad_free"` // note that this is somhow redunant, since there is already ApiPerks.NoAds + ValidatorDashboards uint64 `json:"validator_dashboards"` + ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` + ValidatorGroupsPerDashboard uint64 `json:"validator_groups_per_dashboard"` + ShareCustomDashboards bool `json:"share_custom_dashboards"` + ManageDashboardViaApi bool `json:"manage_dashboard_via_api"` + BulkAdding bool `json:"bulk_adding"` + ChartHistorySeconds ChartHistorySeconds `json:"chart_history_seconds"` + EmailNotificationsPerDay uint64 `json:"email_notifications_per_day"` + ConfigureNotificationsViaApi bool `json:"configure_notifications_via_api"` + ValidatorGroupNotifications uint64 `json:"validator_group_notifications"` + WebhookEndpoints uint64 `json:"webhook_endpoints"` + MobileAppCustomThemes bool `json:"mobile_app_custom_themes"` + MobileAppWidget bool `json:"mobile_app_widget"` + MonitorMachines uint64 `json:"monitor_machines"` + MachineMonitoringHistorySeconds uint64 `json:"machine_monitoring_history_seconds"` + NotificationsMachineCustomThreshold bool `json:"notifications_machine_custom_threshold"` + NotificationsValidatorDashboardGroupEfficiency bool `json:"notifications_validator_dashboard_group_efficiency"` } // TODO @patrick post-beta StripeCreateCheckoutSession and StripeCustomerPortal are currently served from v1 (loadbalanced), Once V1 is not affected by this anymore, consider wrapping this with ApiDataResponse diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go index f5df0b039..d8de98b71 100644 --- a/backend/pkg/api/types/validator_dashboard.go +++ b/backend/pkg/api/types/validator_dashboard.go @@ -308,12 +308,7 @@ type VDBRocketPoolTableRow struct { Claimed decimal.Decimal `json:"claimed"` Unclaimed decimal.Decimal `json:"unclaimed"` } `json:"smoothing_pool"` -} -type GetValidatorDashboardRocketPoolResponse ApiPagingResponse[VDBRocketPoolTableRow] - -type GetValidatorDashboardTotalRocketPoolResponse ApiDataResponse[VDBRocketPoolTableRow] -type VDBNodeRocketPoolData struct { Timezone string `json:"timezone"` RefundBalance decimal.Decimal `json:"refund_balance"` DepositCredit decimal.Decimal `json:"deposit_credit"` @@ -322,8 +317,9 @@ type VDBNodeRocketPoolData struct { Max decimal.Decimal `json:"max"` } `json:"rpl_stake"` } +type GetValidatorDashboardRocketPoolResponse ApiPagingResponse[VDBRocketPoolTableRow] -type GetValidatorDashboardNodeRocketPoolResponse ApiDataResponse[VDBNodeRocketPoolData] +type GetValidatorDashboardTotalRocketPoolResponse ApiDataResponse[VDBRocketPoolTableRow] type VDBRocketPoolMinipoolsTableRow struct { Node Address `json:"node"` diff --git a/backend/pkg/commons/db/migrations/postgres/20241022072552_head_notification_status_tracking_table.sql b/backend/pkg/commons/db/migrations/postgres/20241022072552_head_notification_status_tracking_table.sql new file mode 100644 index 000000000..637704a10 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20241022072552_head_notification_status_tracking_table.sql @@ -0,0 +1,16 @@ +-- +goose Up +-- +goose StatementBegin +SELECT 'creating epochs_notified_head table'; +CREATE TABLE IF NOT EXISTS epochs_notified_head ( + epoch INTEGER NOT NULL, + event_name VARCHAR(255) NOT NULL, + senton TIMESTAMP WITHOUT TIME ZONE NOT NULL, + PRIMARY KEY (epoch, event_name) +); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT 'dropping epochs_notified_head table'; +DROP TABLE IF EXISTS epochs_notified_head; +-- +goose StatementEnd diff --git a/backend/pkg/commons/db/user.go b/backend/pkg/commons/db/user.go new file mode 100644 index 000000000..8ad275f8b --- /dev/null +++ b/backend/pkg/commons/db/user.go @@ -0,0 +1,466 @@ +package db + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math" + "time" + + t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/jmoiron/sqlx" +) + +var ErrNotFound = errors.New("not found") + +const hour uint64 = 3600 +const day = 24 * hour +const week = 7 * day +const month = 30 * day +const maxJsInt uint64 = 9007199254740991 // 2^53-1 (max safe int in JS) + +var freeTierProduct t.PremiumProduct = t.PremiumProduct{ + ProductName: "Free", + PremiumPerks: t.PremiumPerks{ + AdFree: false, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 20, + ValidatorGroupsPerDashboard: 1, + ShareCustomDashboards: false, + ManageDashboardViaApi: false, + BulkAdding: false, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 0, + Hourly: 12 * hour, + Daily: 0, + Weekly: 0, + }, + EmailNotificationsPerDay: 10, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 1, + WebhookEndpoints: 1, + MobileAppCustomThemes: false, + MobileAppWidget: false, + MonitorMachines: 1, + MachineMonitoringHistorySeconds: 3600 * 3, + NotificationsMachineCustomThreshold: false, + NotificationsValidatorDashboardGroupEfficiency: false, + }, + PricePerMonthEur: 0, + PricePerYearEur: 0, + ProductIdMonthly: "premium_free", + ProductIdYearly: "premium_free.yearly", +} + +var adminPerks = t.PremiumPerks{ + AdFree: false, // admins want to see ads to check ad configuration + ValidatorDashboards: maxJsInt, + ValidatorsPerDashboard: maxJsInt, + ValidatorGroupsPerDashboard: maxJsInt, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: maxJsInt, + Hourly: maxJsInt, + Daily: maxJsInt, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: maxJsInt, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: maxJsInt, + WebhookEndpoints: maxJsInt, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: maxJsInt, + MachineMonitoringHistorySeconds: maxJsInt, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, +} + +func GetUserInfo(ctx context.Context, userId uint64, userDbReader *sqlx.DB) (*t.UserInfo, error) { + // TODO @patrick post-beta improve and unmock + userInfo := &t.UserInfo{ + Id: userId, + ApiKeys: []string{}, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10, + ApiKeys: 4, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + Subscriptions: []t.UserSubscription{}, + } + + productSummary, err := GetProductSummary(ctx) + if err != nil { + return nil, fmt.Errorf("error getting productSummary: %w", err) + } + + result := struct { + Email string `db:"email"` + UserGroup string `db:"user_group"` + }{} + err = userDbReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("%w: user not found", ErrNotFound) + } + return nil, err + } + userInfo.Email = result.Email + userInfo.UserGroup = result.UserGroup + + userInfo.Email = utils.CensorEmail(userInfo.Email) + + err = userDbReader.SelectContext(ctx, &userInfo.ApiKeys, `SELECT api_key FROM api_keys WHERE user_id = $1`, userId) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting userApiKeys for user %v: %w", userId, err) + } + + premiumProduct := struct { + ProductId string `db:"product_id"` + Store string `db:"store"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + }{} + err = userDbReader.GetContext(ctx, &premiumProduct, ` + SELECT + COALESCE(uas.product_id, '') AS product_id, + COALESCE(uas.store, '') AS store, + COALESCE(to_timestamp((uss.payload->>'current_period_start')::bigint),uas.created_at) AS start, + COALESCE(to_timestamp((uss.payload->>'current_period_end')::bigint),uas.expires_at) AS end + FROM users_app_subscriptions uas + LEFT JOIN users_stripe_subscriptions uss ON uss.subscription_id = uas.subscription_id + WHERE uas.user_id = $1 AND uas.active = true AND product_id IN ('orca.yearly', 'orca', 'dolphin.yearly', 'dolphin', 'guppy.yearly', 'guppy', 'whale', 'goldfish', 'plankton') + ORDER BY CASE uas.product_id + WHEN 'orca.yearly' THEN 1 + WHEN 'orca' THEN 2 + WHEN 'dolphin.yearly' THEN 3 + WHEN 'dolphin' THEN 4 + WHEN 'guppy.yearly' THEN 5 + WHEN 'guppy' THEN 6 + WHEN 'whale' THEN 7 + WHEN 'goldfish' THEN 8 + WHEN 'plankton' THEN 9 + ELSE 10 -- For any other product_id values + END, uas.id DESC + LIMIT 1`, userId) + if err != nil { + if err != sql.ErrNoRows { + return nil, fmt.Errorf("error getting premiumProduct for userId %v: %w", userId, err) + } + premiumProduct.ProductId = "premium_free" + premiumProduct.Store = "" + } + + foundProduct := false + for _, p := range productSummary.PremiumProducts { + effectiveProductId := premiumProduct.ProductId + productName := p.ProductName + switch premiumProduct.ProductId { + case "whale": + effectiveProductId = "dolphin" + productName = "Whale" + case "goldfish": + effectiveProductId = "guppy" + productName = "Goldfish" + case "plankton": + effectiveProductId = "guppy" + productName = "Plankton" + } + if p.ProductIdMonthly == effectiveProductId || p.ProductIdYearly == effectiveProductId { + userInfo.PremiumPerks = p.PremiumPerks + foundProduct = true + + store := t.ProductStoreStripe + switch premiumProduct.Store { + case "ios-appstore": + store = t.ProductStoreIosAppstore + case "android-playstore": + store = t.ProductStoreAndroidPlaystore + case "ethpool": + store = t.ProductStoreEthpool + case "manuall": + store = t.ProductStoreCustom + } + + if effectiveProductId != "premium_free" { + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: premiumProduct.ProductId, + ProductName: productName, + ProductCategory: t.ProductCategoryPremium, + ProductStore: store, + Start: premiumProduct.Start.Unix(), + End: premiumProduct.End.Unix(), + }) + } + break + } + } + if !foundProduct { + return nil, fmt.Errorf("product %s not found", premiumProduct.ProductId) + } + + premiumAddons := []struct { + PriceId string `db:"price_id"` + Start time.Time `db:"start"` + End time.Time `db:"end"` + Quantity int `db:"quantity"` + }{} + err = userDbReader.SelectContext(ctx, &premiumAddons, ` + SELECT + price_id, + to_timestamp((uss.payload->>'current_period_start')::bigint) AS start, + to_timestamp((uss.payload->>'current_period_end')::bigint) AS end, + COALESCE((uss.payload->>'quantity')::int,1) AS quantity + FROM users_stripe_subscriptions uss + INNER JOIN users u ON u.stripe_customer_id = uss.customer_id + WHERE u.id = $1 AND uss.active = true AND uss.purchase_group = 'addon'`, userId) + if err != nil { + return nil, fmt.Errorf("error getting premiumAddons for userId %v: %w", userId, err) + } + for _, addon := range premiumAddons { + foundAddon := false + for _, p := range productSummary.ExtraDashboardValidatorsPremiumAddon { + if p.StripePriceIdMonthly == addon.PriceId || p.StripePriceIdYearly == addon.PriceId { + foundAddon = true + for i := 0; i < addon.Quantity; i++ { + userInfo.PremiumPerks.ValidatorsPerDashboard += p.ExtraDashboardValidators + userInfo.Subscriptions = append(userInfo.Subscriptions, t.UserSubscription{ + ProductId: utils.PriceIdToProductId(addon.PriceId), + ProductName: p.ProductName, + ProductCategory: t.ProductCategoryPremiumAddon, + ProductStore: t.ProductStoreStripe, + Start: addon.Start.Unix(), + End: addon.End.Unix(), + }) + } + } + } + if !foundAddon { + return nil, fmt.Errorf("addon not found: %v", addon.PriceId) + } + } + + if productSummary.ValidatorsPerDashboardLimit < userInfo.PremiumPerks.ValidatorsPerDashboard { + userInfo.PremiumPerks.ValidatorsPerDashboard = productSummary.ValidatorsPerDashboardLimit + } + + if userInfo.UserGroup == t.UserGroupAdmin { + userInfo.PremiumPerks = adminPerks + } + + return userInfo, nil +} + +func GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { // TODO @patrick post-beta put into db instead of hardcoding here and make it configurable + return &t.ProductSummary{ + ValidatorsPerDashboardLimit: 102_000, + StripePublicKey: utils.Config.Frontend.Stripe.PublicKey, + ApiProducts: []t.ApiProduct{ // TODO @patrick post-beta this data is not final yet + { + ProductId: "api_free", + ProductName: "Free", + PricePerMonthEur: 0, + PricePerYearEur: 0 * 12, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 10, + UnitsPerMonth: 10_000_000, + ApiKeys: 2, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "iron", + ProductName: "Iron", + PricePerMonthEur: 1.99, + PricePerYearEur: math.Floor(1.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 20, + UnitsPerMonth: 20_000_000, + ApiKeys: 10, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "silver", + ProductName: "Silver", + PricePerMonthEur: 2.99, + PricePerYearEur: math.Floor(2.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 30, + UnitsPerMonth: 100_000_000, + ApiKeys: 20, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + { + ProductId: "gold", + ProductName: "Gold", + PricePerMonthEur: 3.99, + PricePerYearEur: math.Floor(3.99*12*0.9*100) / 100, + ApiPerks: t.ApiPerks{ + UnitsPerSecond: 40, + UnitsPerMonth: 200_000_000, + ApiKeys: 40, + ConsensusLayerAPI: true, + ExecutionLayerAPI: true, + Layer2API: true, + NoAds: true, + DiscordSupport: false, + }, + }, + }, + PremiumProducts: []t.PremiumProduct{ + freeTierProduct, + { + ProductName: "Guppy", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 1, + ValidatorsPerDashboard: 100, + ValidatorGroupsPerDashboard: 3, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: day, + Hourly: 7 * day, + Daily: month, + Weekly: 0, + }, + EmailNotificationsPerDay: 15, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 3, + WebhookEndpoints: 3, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 2, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, + }, + PricePerMonthEur: 9.99, + PricePerYearEur: 107.88, + ProductIdMonthly: "guppy", + ProductIdYearly: "guppy.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Guppy, + StripePriceIdYearly: utils.Config.Frontend.Stripe.GuppyYearly, + }, + { + ProductName: "Dolphin", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 300, + ValidatorGroupsPerDashboard: 10, + ShareCustomDashboards: true, + ManageDashboardViaApi: false, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 5 * day, + Hourly: month, + Daily: 2 * month, + Weekly: 8 * week, + }, + EmailNotificationsPerDay: 20, + ConfigureNotificationsViaApi: false, + ValidatorGroupNotifications: 10, + WebhookEndpoints: 10, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, + }, + PricePerMonthEur: 29.99, + PricePerYearEur: 311.88, + ProductIdMonthly: "dolphin", + ProductIdYearly: "dolphin.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Dolphin, + StripePriceIdYearly: utils.Config.Frontend.Stripe.DolphinYearly, + }, + { + ProductName: "Orca", + PremiumPerks: t.PremiumPerks{ + AdFree: true, + ValidatorDashboards: 2, + ValidatorsPerDashboard: 1000, + ValidatorGroupsPerDashboard: 30, + ShareCustomDashboards: true, + ManageDashboardViaApi: true, + BulkAdding: true, + ChartHistorySeconds: t.ChartHistorySeconds{ + Epoch: 3 * week, + Hourly: 6 * month, + Daily: 12 * month, + Weekly: maxJsInt, + }, + EmailNotificationsPerDay: 50, + ConfigureNotificationsViaApi: true, + ValidatorGroupNotifications: 60, + WebhookEndpoints: 30, + MobileAppCustomThemes: true, + MobileAppWidget: true, + MonitorMachines: 10, + MachineMonitoringHistorySeconds: 3600 * 24 * 30, + NotificationsMachineCustomThreshold: true, + NotificationsValidatorDashboardGroupEfficiency: true, + }, + PricePerMonthEur: 49.99, + PricePerYearEur: 479.88, + ProductIdMonthly: "orca", + ProductIdYearly: "orca.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.Orca, + StripePriceIdYearly: utils.Config.Frontend.Stripe.OrcaYearly, + IsPopular: true, + }, + }, + ExtraDashboardValidatorsPremiumAddon: []t.ExtraDashboardValidatorsPremiumAddon{ + { + ProductName: "1k extra valis per dashboard", + ExtraDashboardValidators: 1000, + PricePerMonthEur: 74.99, + PricePerYearEur: 719.88, + ProductIdMonthly: "vdb_addon_1k", + ProductIdYearly: "vdb_addon_1k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon1k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon1kYearly, + }, + { + ProductName: "10k extra valis per dashboard", + ExtraDashboardValidators: 10000, + PricePerMonthEur: 449.99, + PricePerYearEur: 4319.88, + ProductIdMonthly: "vdb_addon_10k", + ProductIdYearly: "vdb_addon_10k.yearly", + StripePriceIdMonthly: utils.Config.Frontend.Stripe.VdbAddon10k, + StripePriceIdYearly: utils.Config.Frontend.Stripe.VdbAddon10kYearly, + }, + }, + }, nil +} + +func GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { + return &freeTierProduct.PremiumPerks, nil +} diff --git a/backend/pkg/commons/mail/mail.go b/backend/pkg/commons/mail/mail.go index e2e9d08e2..60f27a14c 100644 --- a/backend/pkg/commons/mail/mail.go +++ b/backend/pkg/commons/mail/mail.go @@ -73,35 +73,46 @@ func createTextMessage(msg types.Email) string { // SendMailRateLimited sends an email to a given address with the given message. // It will return a ratelimit-error if the configured ratelimit is exceeded. func SendMailRateLimited(content types.TransitEmailContent) error { - if utils.Config.Frontend.MaxMailsPerEmailPerDay > 0 { - now := time.Now() - count, err := db.CountSentMessage("n_mails", content.UserId) - if err != nil { - return err - } - timeLeft := now.Add(utils.Day).Truncate(utils.Day).Sub(now) - if count > int64(utils.Config.Frontend.MaxMailsPerEmailPerDay) { - return &types.RateLimitError{TimeLeft: timeLeft} - } else if count == int64(utils.Config.Frontend.MaxMailsPerEmailPerDay) { - // send an email if this was the last email for today - err := SendHTMLMail(content.Address, - "beaconcha.in - Email notification threshold limit reached", - types.Email{ - Title: "Email notification threshold limit reached", - //nolint: gosec - Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", utils.Config.Frontend.MaxMailsPerEmailPerDay, timeLeft.Hours())), - }, - []types.EmailAttachment{}) - if err != nil { - return err - } - } + sendThresholdReachedMail := false + maxEmailsPerDay := int64(0) + userInfo, err := db.GetUserInfo(context.Background(), uint64(content.UserId), db.FrontendReaderDB) + if err != nil { + return err } - - err := SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) + maxEmailsPerDay = int64(userInfo.PremiumPerks.EmailNotificationsPerDay) + count, err := db.CountSentMessage("n_mails", content.UserId) if err != nil { return err } + timeLeft := time.Until(time.Now().Add(utils.Day).Truncate(utils.Day)) + + log.Debugf("user %d has sent %d of %d emails today, time left is %v", content.UserId, count, maxEmailsPerDay, timeLeft) + if count > maxEmailsPerDay { + return &types.RateLimitError{TimeLeft: timeLeft} + } else if count == maxEmailsPerDay { + sendThresholdReachedMail = true + } + + err = SendHTMLMail(content.Address, content.Subject, content.Email, content.Attachments) + if err != nil { + log.Error(err, "error sending email", 0) + } + + // make sure the threshold reached email arrives last + if sendThresholdReachedMail { + // send an email if this was the last email for today + err := SendHTMLMail(content.Address, + "beaconcha.in - Email notification threshold limit reached", + types.Email{ + Title: "Email notification threshold limit reached", + //nolint: gosec + Body: template.HTML(fmt.Sprintf("You have reached the email notification threshold limit of %d emails per day. Further notification emails will be suppressed for %.1f hours.", maxEmailsPerDay, timeLeft.Hours())), + }, + []types.EmailAttachment{}) + if err != nil { + return err + } + } return nil } diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 64b71cb09..60e0a7f48 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -68,9 +68,7 @@ const ( ValidatorMissedProposalEventName EventName = "validator_proposal_missed" ValidatorExecutedProposalEventName EventName = "validator_proposal_submitted" - ValidatorDidSlashEventName EventName = "validator_did_slash" - ValidatorGroupIsOfflineEventName EventName = "validator_group_is_offline" - ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" + ValidatorDidSlashEventName EventName = "validator_did_slash" ValidatorReceivedDepositEventName EventName = "validator_received_deposit" NetworkSlashingEventName EventName = "network_slashing" @@ -80,25 +78,22 @@ const ( NetworkValidatorExitQueueNotFullEventName EventName = "network_validator_exit_queue_not_full" NetworkLivenessIncreasedEventName EventName = "network_liveness_increased" TaxReportEventName EventName = "user_tax_report" - //nolint:misspell - RocketpoolCollateralMinReachedEventName EventName = "rocketpool_colleteral_min" - //nolint:misspell - RocketpoolCollateralMaxReachedEventName EventName = "rocketpool_colleteral_max" - SyncCommitteeSoonEventName EventName = "validator_synccommittee_soon" + SyncCommitteeSoonEventName EventName = "validator_synccommittee_soon" //nolint:misspell RocketpoolCommissionThresholdEventName EventName = "rocketpool_commision_threshold" // Validator dashboard events - ValidatorIsOfflineEventName EventName = "validator_is_offline" - GroupIsOfflineEventName EventName = "group_is_offline" - ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" - ValidatorProposalEventName EventName = "validator_proposal" - ValidatorUpcomingProposalEventName EventName = "validator_proposal_upcoming" - SyncCommitteeSoon EventName = "validator_synccommittee_soon" - ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" - ValidatorGotSlashedEventName EventName = "validator_got_slashed" - RocketpoolCollateralMinReached EventName = "rocketpool_colleteral_min" //nolint:misspell - RocketpoolCollateralMaxReached EventName = "rocketpool_colleteral_max" //nolint:misspell + ValidatorIsOfflineEventName EventName = "validator_is_offline" + ValidatorIsOnlineEventName EventName = "validator_is_online" + ValidatorGroupEfficiencyEventName EventName = "validator_group_efficiency" + ValidatorMissedAttestationEventName EventName = "validator_attestation_missed" + ValidatorProposalEventName EventName = "validator_proposal" + ValidatorUpcomingProposalEventName EventName = "validator_proposal_upcoming" + SyncCommitteeSoon EventName = "validator_synccommittee_soon" + ValidatorReceivedWithdrawalEventName EventName = "validator_withdrawal" + ValidatorGotSlashedEventName EventName = "validator_got_slashed" + RocketpoolCollateralMinReachedEventName EventName = "rocketpool_colleteral_min" //nolint:misspell + RocketpoolCollateralMaxReachedEventName EventName = "rocketpool_colleteral_max" //nolint:misspell // Account dashboard events IncomingTransactionEventName EventName = "incoming_transaction" @@ -124,6 +119,7 @@ const ( ) var EventSortOrder = []EventName{ + ValidatorUpcomingProposalEventName, ValidatorGotSlashedEventName, ValidatorDidSlashEventName, ValidatorMissedProposalEventName, @@ -134,6 +130,8 @@ var EventSortOrder = []EventName{ MonitoringMachineMemoryUsageEventName, SyncCommitteeSoonEventName, ValidatorIsOfflineEventName, + ValidatorIsOnlineEventName, + ValidatorGroupEfficiencyEventName, ValidatorReceivedWithdrawalEventName, NetworkLivenessIncreasedEventName, EthClientUpdateEventName, @@ -176,12 +174,15 @@ var MachineEventsMap = map[EventName]struct{}{ } var LegacyEventLabel map[EventName]string = map[EventName]string{ + ValidatorUpcomingProposalEventName: "Your validator(s) will soon propose a block", + ValidatorGroupEfficiencyEventName: "Your validator group efficiency is low", ValidatorMissedProposalEventName: "Your validator(s) missed a proposal", ValidatorExecutedProposalEventName: "Your validator(s) submitted a proposal", ValidatorMissedAttestationEventName: "Your validator(s) missed an attestation", ValidatorGotSlashedEventName: "Your validator(s) got slashed", ValidatorDidSlashEventName: "Your validator(s) slashed another validator", - ValidatorIsOfflineEventName: "Your validator(s) state changed", + ValidatorIsOfflineEventName: "Your validator(s) went offline", + ValidatorIsOnlineEventName: "Your validator(s) came back online", ValidatorReceivedWithdrawalEventName: "A withdrawal was initiated for your validators", NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", EthClientUpdateEventName: "An Ethereum client has a new update available", @@ -198,13 +199,16 @@ var LegacyEventLabel map[EventName]string = map[EventName]string{ } var EventLabel map[EventName]string = map[EventName]string{ + ValidatorUpcomingProposalEventName: "Upcoming block proposal", + ValidatorGroupEfficiencyEventName: "Low validator group efficiency", ValidatorMissedProposalEventName: "Block proposal missed", ValidatorExecutedProposalEventName: "Block proposal submitted", ValidatorMissedAttestationEventName: "Attestation missed", ValidatorGotSlashedEventName: "Validator slashed", ValidatorDidSlashEventName: "Validator has slashed", - ValidatorIsOfflineEventName: "Validator online / offline", - ValidatorReceivedWithdrawalEventName: "Validator withdrawal initiated", + ValidatorIsOfflineEventName: "Validator offline", + ValidatorIsOnlineEventName: "Validator back online", + ValidatorReceivedWithdrawalEventName: "Withdrawal processed", NetworkLivenessIncreasedEventName: "The network is experiencing liveness issues", EthClientUpdateEventName: "An Ethereum client has a new update available", MonitoringMachineOfflineEventName: "Machine offline", @@ -231,11 +235,13 @@ func IsMachineNotification(event EventName) bool { var EventNames = []EventName{ ValidatorExecutedProposalEventName, + ValidatorGroupEfficiencyEventName, ValidatorMissedProposalEventName, ValidatorMissedAttestationEventName, ValidatorGotSlashedEventName, ValidatorDidSlashEventName, ValidatorIsOfflineEventName, + ValidatorIsOnlineEventName, ValidatorReceivedWithdrawalEventName, NetworkLivenessIncreasedEventName, EthClientUpdateEventName, diff --git a/backend/pkg/commons/utils/efficiency.go b/backend/pkg/commons/utils/efficiency.go new file mode 100644 index 000000000..5bb7cd57c --- /dev/null +++ b/backend/pkg/commons/utils/efficiency.go @@ -0,0 +1,25 @@ +package utils + +import "database/sql" + +func CalculateTotalEfficiency(attestationEff, proposalEff, syncEff sql.NullFloat64) float64 { + efficiency := float64(0) + + if !attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { + efficiency = 0 + } else if attestationEff.Valid && !proposalEff.Valid && !syncEff.Valid { + efficiency = attestationEff.Float64 * 100.0 + } else if attestationEff.Valid && proposalEff.Valid && !syncEff.Valid { + efficiency = ((56.0 / 64.0 * attestationEff.Float64) + (8.0 / 64.0 * proposalEff.Float64)) * 100.0 + } else if attestationEff.Valid && !proposalEff.Valid && syncEff.Valid { + efficiency = ((62.0 / 64.0 * attestationEff.Float64) + (2.0 / 64.0 * syncEff.Float64)) * 100.0 + } else { + efficiency = (((54.0 / 64.0) * attestationEff.Float64) + ((8.0 / 64.0) * proposalEff.Float64) + ((2.0 / 64.0) * syncEff.Float64)) * 100.0 + } + + if efficiency < 0 { + efficiency = 0 + } + + return efficiency +} diff --git a/backend/pkg/commons/utils/utils.go b/backend/pkg/commons/utils/utils.go index c84aca8d3..4a9e1538a 100644 --- a/backend/pkg/commons/utils/utils.go +++ b/backend/pkg/commons/utils/utils.go @@ -396,3 +396,10 @@ func Deduplicate(slice []uint64) []uint64 { } return list } + +func FirstN(input string, n int) string { + if len(input) <= n { + return input + } + return input[:n] +} diff --git a/backend/pkg/exporter/modules/base.go b/backend/pkg/exporter/modules/base.go index 300f44229..1dba9ed01 100644 --- a/backend/pkg/exporter/modules/base.go +++ b/backend/pkg/exporter/modules/base.go @@ -32,8 +32,8 @@ type ModuleInterface interface { var Client *rpc.Client // Start will start the export of data from rpc into the database -func StartAll(context ModuleContext) { - if !utils.Config.JustV2 { +func StartAll(context ModuleContext, modules []ModuleInterface, justV2 bool) { + if !justV2 { go networkLivenessUpdater(context.ConsClient) go genesisDepositsExporter(context.ConsClient) go syncCommitteesExporter(context.ConsClient) @@ -65,19 +65,6 @@ func StartAll(context ModuleContext) { } // start subscription modules - - modules := []ModuleInterface{} - - if utils.Config.JustV2 { - modules = append(modules, NewDashboardDataModule(context)) - } else { - modules = append(modules, - NewSlotExporter(context), - NewExecutionDepositsExporter(context), - NewExecutionPayloadsExporter(context), - ) - } - startSubscriptionModules(&context, modules) } diff --git a/backend/pkg/exporter/modules/execution_deposits_exporter.go b/backend/pkg/exporter/modules/execution_deposits_exporter.go index 41fab0f25..ef7d2129f 100644 --- a/backend/pkg/exporter/modules/execution_deposits_exporter.go +++ b/backend/pkg/exporter/modules/execution_deposits_exporter.go @@ -8,7 +8,7 @@ import ( "encoding/hex" "fmt" "math/big" - "sync" + "sync/atomic" "time" "github.com/attestantio/go-eth2-client/spec/phase0" @@ -19,13 +19,16 @@ import ( gethtypes "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethclient" gethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/go-redis/redis/v8" "golang.org/x/exp/maps" + "golang.org/x/sync/errgroup" "github.com/gobitfly/beaconchain/pkg/commons/contracts/deposit_contract" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/rpc" + "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" @@ -38,37 +41,34 @@ import ( type executionDepositsExporter struct { ModuleContext - Client rpc.Client - ErigonClient *gethrpc.Client - GethClient *gethrpc.Client - LogClient *ethclient.Client - LogFilterer *deposit_contract.DepositContractFilterer - DepositContractAddress common.Address - LastExportedBlock uint64 - ExportMutex *sync.Mutex - StopEarlyMutex *sync.Mutex - StopEarly context.CancelFunc - Signer gethtypes.Signer - DepositMethod abi.Method + Client rpc.Client + ErigonClient *gethrpc.Client + GethClient *gethrpc.Client + LogClient *ethclient.Client + LogFilterer *deposit_contract.DepositContractFilterer + DepositContractAddress common.Address + LastExportedBlock uint64 + LastExportedFinalizedBlock uint64 + LastExportedFinalizedBlockRedisKey string + CurrentHeadBlock atomic.Uint64 + Signer gethtypes.Signer + DepositMethod abi.Method } func NewExecutionDepositsExporter(moduleContext ModuleContext) ModuleInterface { return &executionDepositsExporter{ - ModuleContext: moduleContext, - Client: moduleContext.ConsClient, - DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), - LastExportedBlock: 0, - ExportMutex: &sync.Mutex{}, - StopEarlyMutex: &sync.Mutex{}, + ModuleContext: moduleContext, + Client: moduleContext.ConsClient, + DepositContractAddress: common.HexToAddress(utils.Config.Chain.ClConfig.DepositContractAddress), + LastExportedBlock: 0, + LastExportedFinalizedBlock: 0, } } -func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { - return nil // nop -} - func (d *executionDepositsExporter) Init() error { - d.Signer = gethtypes.NewCancunSigner(big.NewInt(int64(utils.Config.Chain.ClConfig.DepositChainID))) + d.Signer = gethtypes.NewCancunSigner(big.NewInt(0).SetUint64(utils.Config.Chain.ClConfig.DepositChainID)) + + d.LastExportedFinalizedBlockRedisKey = fmt.Sprintf("%d:execution_deposits_exporter:last_exported_finalized_block", utils.Config.Chain.ClConfig.DepositChainID) rpcClient, err := gethrpc.Dial(utils.Config.Eth1GethEndpoint) if err != nil { @@ -124,14 +124,29 @@ func (d *executionDepositsExporter) Init() error { d.LastExportedBlock = utils.Config.Indexer.ELDepositContractFirstBlock } - log.Infof("initialized execution deposits exporter with last exported block: %v", d.LastExportedBlock) + val, err := db.PersistentRedisDbClient.Get(context.Background(), d.LastExportedFinalizedBlockRedisKey).Uint64() + switch { + case err == redis.Nil: + log.Warnf("%v missing in redis, exporting from beginning", d.LastExportedFinalizedBlockRedisKey) + case err != nil: + log.Fatal(err, "error getting last exported finalized block from redis", 0) + } + + d.LastExportedFinalizedBlock = val + // avoid fetching old bocks on a chain without deposits + if d.LastExportedFinalizedBlock > d.LastExportedBlock { + d.LastExportedBlock = d.LastExportedFinalizedBlock + } + + log.Infof("initialized execution deposits exporter with last exported block/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock) - // quick kick-start go func() { - err := d.OnFinalizedCheckpoint(nil) + // quick kick-start + err = d.OnHead(nil) if err != nil { - log.Error(err, "error during kick-start", 0) + log.Error(err, "error kick-starting executionDepositsExporter", 0) } + d.exportLoop() }() return nil @@ -145,90 +160,119 @@ func (d *executionDepositsExporter) OnChainReorg(event *constypes.StandardEventC return nil // nop } -// can take however long it wants to run, is run in a separate goroutine, so no need to worry about blocking func (d *executionDepositsExporter) OnFinalizedCheckpoint(event *constypes.StandardFinalizedCheckpointResponse) (err error) { - // important: have to fetch the actual finalized epoch because even tho its called on finalized checkpoint it actually emits for each justified epoch - // so we have to do an extra request to get the actual latest finalized epoch - res, err := d.CL.GetFinalityCheckpoints("head") - if err != nil { - return err - } + return nil // nop +} - var nearestELBlock sql.NullInt64 - err = db.ReaderDb.Get(&nearestELBlock, "select exec_block_number from blocks where slot <= $1 and exec_block_number > 0 order by slot desc limit 1", res.Data.Finalized.Epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch) - if err != nil { - return err - } - if !nearestELBlock.Valid { - return fmt.Errorf("no block found for finalized epoch %v", res.Data.Finalized.Epoch) +func (d *executionDepositsExporter) OnHead(event *constypes.StandardEventHeadResponse) (err error) { + return nil // nop +} + +func (d *executionDepositsExporter) exportLoop() { + ticker := time.NewTicker(time.Second * 10) + defer ticker.Stop() + for ; true; <-ticker.C { + err := d.export() + if err != nil { + log.Error(err, "error during export", 0) + services.ReportStatus("execution_deposits_exporter", err.Error(), nil) + } else { + services.ReportStatus("execution_deposits_exporter", "Running", nil) + } } - log.Debugf("exporting execution layer deposits till block %v", nearestELBlock.Int64) +} - err = d.exportTillBlock(uint64(nearestELBlock.Int64)) +func (d *executionDepositsExporter) export() (err error) { + var headBlock, finBlock uint64 + var g errgroup.Group + g.Go(func() error { + headSlot, err := d.CL.GetSlot("head") + if err != nil { + return fmt.Errorf("error getting head-slot: %w", err) + } + headBlock = headSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + g.Go(func() error { + finSlot, err := d.CL.GetSlot("finalized") + if err != nil { + return fmt.Errorf("error getting finalized-slot: %w", err) + } + finBlock = finSlot.Data.Message.Body.ExecutionPayload.BlockNumber + return nil + }) + err = g.Wait() if err != nil { return err } - return nil -} - -// this is basically synchronous, each time it gets called it will kill the previous export and replace it with itself -func (d *executionDepositsExporter) exportTillBlock(block uint64) (err error) { - // following blocks if a previous function call is still waiting for an export to stop early - d.StopEarlyMutex.Lock() - if d.StopEarly != nil { - // this will run even if the previous export has already finished - // preventing this would require an overly complex solution - log.Debugf("asking potentially running export to stop early") - d.StopEarly() + if d.LastExportedBlock >= headBlock && d.LastExportedFinalizedBlock >= finBlock { + log.Debugf("skip exporting execution layer deposits: last exported block/finalizedBlock: %v/%v, headBlock/finalizedBlock: %v/%v", d.LastExportedBlock, d.LastExportedFinalizedBlock, headBlock, finBlock) + return nil } - // following blocks as long as the running export hasn't finished yet - d.ExportMutex.Lock() - ctx, cancel := context.WithCancel(context.Background()) - d.StopEarly = cancel - // we have over taken and allow potentially newer function calls to signal us to stop early - d.StopEarlyMutex.Unlock() + nextFinalizedBlock := finBlock blockOffset := d.LastExportedBlock + 1 - blockTarget := block - - defer d.ExportMutex.Unlock() - - log.Infof("exporting execution layer deposits from %v to %v", blockOffset, blockTarget) - - depositsToSave := make([]*types.ELDeposit, 0) - + // make sure to reexport every block since last exported finalized blocks to handle reorgs + if blockOffset > d.LastExportedFinalizedBlock { + blockOffset = d.LastExportedFinalizedBlock + 1 + } + blockTarget := headBlock + + log.InfoWithFields(log.Fields{ + "nextHeadBlock": headBlock, + "nextFinBlock": nextFinalizedBlock, + "lastHeadBlock": d.LastExportedBlock, + "lastFinBlock": d.LastExportedFinalizedBlock, + }, fmt.Sprintf("exporting execution layer deposits from %d to %d", blockOffset, blockTarget)) + + depositsToSaveBatchSize := 10_000 // limit how much deposits we save in one go + blockBatchSize := uint64(10_000) // limit how much blocks we fetch until updating the redis-key + depositsToSave := make([]*types.ELDeposit, 0, depositsToSaveBatchSize) for blockOffset < blockTarget { - tmpBlockTarget := blockOffset + 1000 - if tmpBlockTarget > blockTarget { - tmpBlockTarget = blockTarget + depositsToSave = depositsToSave[:0] + blockBatchStart := blockOffset + for blockOffset < blockTarget && len(depositsToSave) <= depositsToSaveBatchSize && blockOffset < blockBatchStart+blockBatchSize { + tmpBlockTarget := blockOffset + 1000 + if tmpBlockTarget > blockTarget { + tmpBlockTarget = blockTarget + } + log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) + tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + if err != nil { + return err + } + depositsToSave = append(depositsToSave, tmp...) + blockOffset = tmpBlockTarget } - log.Debugf("fetching deposits from %v to %v", blockOffset, tmpBlockTarget) - tmp, err := d.fetchDeposits(blockOffset, tmpBlockTarget) + + log.Debugf("saving %v deposits", len(depositsToSave)) + err = d.saveDeposits(depositsToSave) if err != nil { return err } - depositsToSave = append(depositsToSave, tmp...) - blockOffset = tmpBlockTarget - - select { - case <-ctx.Done(): // a newer function call has asked us to stop early - log.Warnf("stop early signal received, stopping export early") - blockTarget = tmpBlockTarget - default: - continue + d.LastExportedBlock = blockOffset + + prevLastExportedFinalizedBlock := d.LastExportedFinalizedBlock + if nextFinalizedBlock > d.LastExportedBlock && d.LastExportedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = d.LastExportedBlock + } else if nextFinalizedBlock > d.LastExportedFinalizedBlock { + d.LastExportedFinalizedBlock = nextFinalizedBlock } - } - log.Debugf("saving %v deposits", len(depositsToSave)) - err = d.saveDeposits(depositsToSave) - if err != nil { - return err + // update redis to keep track of last exported finalized block persistently + if prevLastExportedFinalizedBlock != d.LastExportedFinalizedBlock { + log.Infof("updating %v: %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + err := db.PersistentRedisDbClient.Set(ctx, d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock, 0).Err() + if err != nil { + log.Error(err, fmt.Sprintf("error setting redis %v = %v", d.LastExportedFinalizedBlockRedisKey, d.LastExportedFinalizedBlock), 0) + } + } } - d.LastExportedBlock = blockTarget - start := time.Now() // update cached view err = d.updateCachedView() @@ -271,8 +315,8 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de return nil, fmt.Errorf("nil deposit-log") } - depositLog := - depositLogIterator.Event + depositLog := depositLogIterator.Event + err = utils.VerifyDepositSignature(&phase0.DepositData{ PublicKey: phase0.BLSPubKey(depositLog.Pubkey), WithdrawalCredentials: depositLog.WithdrawalCredentials, @@ -387,6 +431,10 @@ func (d *executionDepositsExporter) fetchDeposits(fromBlock, toBlock uint64) (de } func (d *executionDepositsExporter) saveDeposits(depositsToSave []*types.ELDeposit) error { + if len(depositsToSave) == 0 { + return nil + } + tx, err := db.WriterDb.Beginx() if err != nil { return err diff --git a/backend/pkg/exporter/modules/relays.go b/backend/pkg/exporter/modules/relays.go index 205586792..78d068f93 100644 --- a/backend/pkg/exporter/modules/relays.go +++ b/backend/pkg/exporter/modules/relays.go @@ -107,21 +107,21 @@ func fetchDeliveredPayloads(r types.Relay, offset uint64) ([]BidTrace, error) { if offset != 0 { url += fmt.Sprintf("&cursor=%v", offset) } - - //nolint:gosec - resp, err := http.Get(url) + client := &http.Client{ + Timeout: time.Second * 30, + } + resp, err := client.Get(url) if err != nil { - log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID}) - return nil, err + log.Error(err, "error retrieving delivered payloads", 0, map[string]interface{}{"relay": r.ID, "offset": offset, "url": url}) + return nil, fmt.Errorf("error retrieving delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } defer resp.Body.Close() err = json.NewDecoder(resp.Body).Decode(&payloads) - if err != nil { - return nil, err + return nil, fmt.Errorf("error decoding json for delivered payloads for relay: %v, offset: %v, url: %v: %w", r.ID, offset, url, err) } return payloads, nil @@ -175,7 +175,7 @@ func retrieveAndInsertPayloadsFromRelay(r types.Relay, low_bound uint64, high_bo for { resp, err := fetchDeliveredPayloads(r, offset) if err != nil { - return err + return fmt.Errorf("error calling fetchDeliveredPayloads with offset: %v for relay: %v: %w", offset, r.ID, err) } if resp == nil { diff --git a/backend/pkg/notification/collection.go b/backend/pkg/notification/collection.go index 8c6918aac..ba6b0568f 100644 --- a/backend/pkg/notification/collection.go +++ b/backend/pkg/notification/collection.go @@ -21,8 +21,11 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" + constypes "github.com/gobitfly/beaconchain/pkg/consapi/types" + "github.com/gobitfly/beaconchain/pkg/exporter/modules" "github.com/lib/pq" "github.com/rocket-pool/rocketpool-go/utils/eth" + "github.com/shopspring/decimal" ) func InitNotificationCollector(pubkeyCachePath string) { @@ -45,8 +48,11 @@ func notificationCollector() { var once sync.Once once.Do(func() { gob.Register(&ValidatorProposalNotification{}) + gob.Register(&ValidatorUpcomingProposalNotification{}) + gob.Register(&ValidatorGroupEfficiencyNotification{}) gob.Register(&ValidatorAttestationNotification{}) gob.Register(&ValidatorIsOfflineNotification{}) + gob.Register(&ValidatorIsOnlineNotification{}) gob.Register(&ValidatorGotSlashedNotification{}) gob.Register(&ValidatorWithdrawalNotification{}) gob.Register(&NetworkNotification{}) @@ -57,6 +63,56 @@ func notificationCollector() { gob.Register(&SyncCommitteeSoonNotification{}) }) + mc, err := modules.GetModuleContext() + if err != nil { + log.Fatal(err, "error getting module context", 0) + } + + go func() { + log.Infof("starting head notification collector") + for ; ; time.Sleep(time.Second * 30) { + // get the head epoch + head, err := mc.ConsClient.GetChainHead() + if err != nil { + log.Error(err, "error getting chain head", 0) + continue + } + + headEpoch := head.HeadEpoch + + var lastNotifiedEpoch uint64 + err = db.WriterDb.Get(&lastNotifiedEpoch, "SELECT COUNT(*) FROM epochs_notified_head WHERE epoch = $1 AND event_name = $2", headEpoch, types.ValidatorUpcomingProposalEventName) + + if err != nil { + log.Error(err, fmt.Sprintf("error checking if upcoming block proposal notifications for epoch %v have already been collected", headEpoch), 0) + continue + } + + if lastNotifiedEpoch > 0 { + log.Warnf("head epoch notifications for epoch %v have already been collected", headEpoch) + continue + } + + notifications, err := collectHeadNotifications(mc, headEpoch) + if err != nil { + log.Error(err, "error collecting head notifications", 0) + } + + _, err = db.WriterDb.Exec("INSERT INTO epochs_notified_head (epoch, event_name, senton) VALUES ($1, $2, NOW())", headEpoch, types.ValidatorUpcomingProposalEventName) + if err != nil { + log.Error(err, "error marking head notification status for epoch in db", 0) + continue + } + + if len(notifications) > 0 { + err = queueNotifications(headEpoch, notifications) + if err != nil { + log.Error(err, "error queuing head notifications", 0) + } + } + } + }() + for { latestFinalizedEpoch := cache.LatestFinalizedEpoch.Get() @@ -105,7 +161,7 @@ func notificationCollector() { log.Infof("collecting notifications for epoch %v", epoch) // Network DB Notifications (network related) - notifications, err := collectNotifications(epoch) + notifications, err := collectNotifications(epoch, mc) if err != nil { log.Error(err, "error collection notifications", 0) @@ -157,7 +213,74 @@ func notificationCollector() { } } -func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { +func collectHeadNotifications(mc modules.ModuleContext, headEpoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} + start := time.Now() + err := collectUpcomingBlockProposalNotifications(notificationsByUserID, mc, headEpoch) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_upcoming_block_proposal").Inc() + return nil, fmt.Errorf("error collecting upcoming block proposal notifications: %v", err) + } + log.Infof("collecting upcoming block proposal notifications took: %v", time.Since(start)) + + return notificationsByUserID, nil +} + +func collectUpcomingBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, mc modules.ModuleContext, headEpoch uint64) (err error) { + nextEpoch := headEpoch + 1 + log.Infof("collecting upcoming block proposal notifications for epoch %v (head epoch is %d)", nextEpoch, headEpoch) + + if utils.EpochToTime(nextEpoch).Before(time.Now()) { + log.Error(fmt.Errorf("error upcoming block proposal notifications for epoch %v are already in the past", nextEpoch), "", 0) + return nil + } + + assignments, err := mc.CL.GetPropoalAssignments(nextEpoch) + if err != nil { + return fmt.Errorf("error getting proposal assignments: %w", err) + } + + subs, err := GetSubsForEventFilter(types.ValidatorUpcomingProposalEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for upcoming block proposal notifications: %w", err) + } + + log.Infof("retrieved %d subscriptions for upcoming block proposal notifications", len(subs)) + if len(subs) == 0 { + return nil + } + + for _, assignment := range assignments.Data { + log.Infof("upcoming block proposal for validator %d in slot %d", assignment.ValidatorIndex, assignment.Slot) + for _, sub := range subs[hex.EncodeToString(assignment.Pubkey)] { + if sub.UserID == nil || sub.ID == nil { + return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) + } + + log.Infof("creating %v notification for validator %v in epoch %v (dashboard: %v)", sub.EventName, assignment.ValidatorIndex, nextEpoch, sub.DashboardId != nil) + n := &ValidatorUpcomingProposalNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: nextEpoch, + EventName: sub.EventName, + EventFilter: hex.EncodeToString(assignment.Pubkey), + DashboardId: sub.DashboardId, + DashboardName: sub.DashboardName, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: sub.DashboardGroupName, + }, + ValidatorIndex: assignment.ValidatorIndex, + Slot: uint64(assignment.Slot), + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + return nil +} + +func collectNotifications(epoch uint64, mc modules.ModuleContext) (types.NotificationsPerUserId, error) { notificationsByUserID := types.NotificationsPerUserId{} start := time.Now() var err error @@ -185,126 +308,52 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } log.Infof("started collecting notifications") - - log.Infof("retrieving dashboard definitions") - // Retrieve all dashboard definitions to be able to retrieve validators included in - // the group notification subscriptions - // TODO: add a filter to retrieve only groups that have notifications enabled - // Needs a new field in the db - dashboardConfigRetrievalStartTs := time.Now() - type dashboardDefinitionRow struct { - DashboardId types.DashboardId `db:"dashboard_id"` - DashboardName string `db:"dashboard_name"` - UserId types.UserId `db:"user_id"` - GroupId types.DashboardGroupId `db:"group_id"` - GroupName string `db:"group_name"` - ValidatorIndex types.ValidatorIndex `db:"validator_index"` - WebhookTarget string `db:"webhook_target"` - WebhookFormat string `db:"webhook_format"` - } - var dashboardDefinitions []dashboardDefinitionRow - err = db.AlloyWriter.Select(&dashboardDefinitions, ` - SELECT - users_val_dashboards.id as dashboard_id, - users_val_dashboards.name as dashboard_name, - users_val_dashboards.user_id, - users_val_dashboards_groups.id as group_id, - users_val_dashboards_groups.name as group_name, - users_val_dashboards_validators.validator_index, - COALESCE(users_val_dashboards_groups.webhook_target, '') AS webhook_target, - COALESCE(users_val_dashboards_groups.webhook_format, '') AS webhook_format - FROM users_val_dashboards - LEFT JOIN users_val_dashboards_groups ON users_val_dashboards_groups.dashboard_id = users_val_dashboards.id - LEFT JOIN users_val_dashboards_validators ON users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id - WHERE users_val_dashboards_validators.validator_index IS NOT NULL; - `) - if err != nil { - return nil, fmt.Errorf("error getting dashboard definitions: %v", err) - } - - // Now initialize the validator dashboard configuration map - validatorDashboardConfig := &types.ValidatorDashboardConfig{ - DashboardsById: make(map[types.DashboardId]*types.ValidatorDashboard), - RocketpoolNodeByPubkey: make(map[string]string), - } - for _, row := range dashboardDefinitions { - if validatorDashboardConfig.DashboardsById[row.DashboardId] == nil { - validatorDashboardConfig.DashboardsById[row.DashboardId] = &types.ValidatorDashboard{ - Name: row.DashboardName, - Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), - } - } - if validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId] == nil { - validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ - Name: row.GroupName, - Validators: []uint64{}, - } - } - validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) - } - - log.Infof("retrieving dashboard definitions took: %v", time.Since(dashboardConfigRetrievalStartTs)) - - // Now collect the mapping of rocketpool node addresses to validator pubkeys - // This is needed for the rocketpool notifications - type rocketpoolNodeRow struct { - Pubkey []byte `db:"pubkey"` - NodeAddress []byte `db:"node_address"` - } - - var rocketpoolNodes []rocketpoolNodeRow - err = db.AlloyWriter.Select(&rocketpoolNodes, ` - SELECT - pubkey, - node_address - FROM rocketpool_minipools;`) - if err != nil { - return nil, fmt.Errorf("error getting rocketpool node addresses: %v", err) - } - - for _, row := range rocketpoolNodes { - validatorDashboardConfig.RocketpoolNodeByPubkey[hex.EncodeToString(row.Pubkey)] = hex.EncodeToString(row.NodeAddress) - } - // The following functions will collect the notifications and add them to the // notificationsByUserID map. The notifications will be queued and sent later // by the notification sender process - err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch, validatorDashboardConfig) + err = collectGroupEfficiencyNotifications(notificationsByUserID, epoch, mc) + if err != nil { + metrics.Errors.WithLabelValues("notifications_collect_group_efficiency").Inc() + return nil, fmt.Errorf("error collecting validator_group_efficiency notifications: %v", err) + } + log.Infof("collecting group efficiency notifications took: %v", time.Since(start)) + + err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() return nil, fmt.Errorf("error collecting validator_attestation_missed notifications: %v", err) } log.Infof("collecting attestation & offline notifications took: %v", time.Since(start)) - err = collectBlockProposalNotifications(notificationsByUserID, 1, types.ValidatorExecutedProposalEventName, epoch, validatorDashboardConfig) + err = collectBlockProposalNotifications(notificationsByUserID, 1, types.ValidatorExecutedProposalEventName, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_executed_block_proposal").Inc() return nil, fmt.Errorf("error collecting validator_proposal_submitted notifications: %v", err) } log.Infof("collecting block proposal proposed notifications took: %v", time.Since(start)) - err = collectBlockProposalNotifications(notificationsByUserID, 2, types.ValidatorMissedProposalEventName, epoch, validatorDashboardConfig) + err = collectBlockProposalNotifications(notificationsByUserID, 2, types.ValidatorMissedProposalEventName, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_block_proposal").Inc() return nil, fmt.Errorf("error collecting validator_proposal_missed notifications: %v", err) } log.Infof("collecting block proposal missed notifications took: %v", time.Since(start)) - err = collectBlockProposalNotifications(notificationsByUserID, 3, types.ValidatorMissedProposalEventName, epoch, validatorDashboardConfig) + err = collectBlockProposalNotifications(notificationsByUserID, 3, types.ValidatorMissedProposalEventName, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_orphaned_block_proposal").Inc() return nil, fmt.Errorf("error collecting validator_proposal_missed notifications for orphaned slots: %w", err) } log.Infof("collecting block proposal missed notifications for orphaned slots took: %v", time.Since(start)) - err = collectValidatorGotSlashedNotifications(notificationsByUserID, epoch, validatorDashboardConfig) + err = collectValidatorGotSlashedNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_validator_got_slashed").Inc() return nil, fmt.Errorf("error collecting validator_got_slashed notifications: %v", err) } log.Infof("collecting validator got slashed notifications took: %v", time.Since(start)) - err = collectWithdrawalNotifications(notificationsByUserID, epoch, validatorDashboardConfig) + err = collectWithdrawalNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_validator_withdrawal").Inc() return nil, fmt.Errorf("error collecting withdrawal notifications: %v", err) @@ -331,7 +380,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { return nil, fmt.Errorf("error collecting rocketpool notifications: %v", err) } } else { - err = collectRocketpoolComissionNotifications(notificationsByUserID, validatorDashboardConfig) + err = collectRocketpoolComissionNotifications(notificationsByUserID) if err != nil { //nolint:misspell metrics.Errors.WithLabelValues("notifications_collect_rocketpool_comission").Inc() @@ -339,21 +388,21 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } log.Infof("collecting rocketpool commissions took: %v", time.Since(start)) - err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID, validatorDashboardConfig) + err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_rocketpool_reward_claim").Inc() return nil, fmt.Errorf("error collecting new rocketpool claim round: %v", err) } log.Infof("collecting rocketpool claim round took: %v", time.Since(start)) - err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMaxReachedEventName, epoch, validatorDashboardConfig) + err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMaxReachedEventName, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_rocketpool_rpl_collateral_max_reached").Inc() return nil, fmt.Errorf("error collecting rocketpool max collateral: %v", err) } log.Infof("collecting rocketpool max collateral took: %v", time.Since(start)) - err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMinReachedEventName, epoch, validatorDashboardConfig) + err = collectRocketpoolRPLCollateralNotifications(notificationsByUserID, types.RocketpoolCollateralMinReachedEventName, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_rocketpool_rpl_collateral_min_reached").Inc() return nil, fmt.Errorf("error collecting rocketpool min collateral: %v", err) @@ -362,7 +411,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } } - err = collectSyncCommitteeNotifications(notificationsByUserID, epoch, validatorDashboardConfig) + err = collectSyncCommitteeNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_sync_committee").Inc() return nil, fmt.Errorf("error collecting sync committee: %v", err) @@ -421,7 +470,264 @@ func collectUserDbNotifications(epoch uint64) (types.NotificationsPerUserId, err return notificationsByUserID, nil } -func collectBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, status uint64, eventName types.EventName, epoch uint64, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectGroupEfficiencyNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64, mc modules.ModuleContext) error { + type dbResult struct { + ValidatorIndex uint64 `db:"validator_index"` + AttestationReward decimal.Decimal `db:"attestations_reward"` + AttestationIdealReward decimal.Decimal `db:"attestations_ideal_reward"` + BlocksProposed uint64 `db:"blocks_proposed"` + BlocksScheduled uint64 `db:"blocks_scheduled"` + SyncExecuted uint64 `db:"sync_executed"` + SyncScheduled uint64 `db:"sync_scheduled"` + } + + // retrieve rewards for the epoch + log.Info("retrieving validator metadata") + validators, err := mc.CL.GetValidators(epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch, nil, []constypes.ValidatorStatus{constypes.Active}) + if err != nil { + return fmt.Errorf("error getting validators: %w", err) + } + effectiveBalanceMap := make(map[uint64]uint64) + activeValidatorsMap := make(map[uint64]struct{}) + for _, validator := range validators.Data { + effectiveBalanceMap[validator.Index] = validator.Validator.EffectiveBalance + activeValidatorsMap[validator.Index] = struct{}{} + } + log.Info("retrieving attestation reward data") + attestationRewards, err := mc.CL.GetAttestationRewards(epoch) + if err != nil { + return fmt.Errorf("error getting attestation rewards: %w", err) + } + + efficiencyMap := make(map[types.ValidatorIndex]*dbResult, len(attestationRewards.Data.TotalRewards)) + + idealRewardsMap := make(map[uint64]decimal.Decimal) + for _, reward := range attestationRewards.Data.IdealRewards { + idealRewardsMap[uint64(reward.EffectiveBalance)] = decimal.NewFromInt(int64(reward.Head) + int64(reward.Target) + int64(reward.Source) + int64(reward.InclusionDelay) + int64(reward.Inactivity)) + } + for _, reward := range attestationRewards.Data.TotalRewards { + efficiencyMap[types.ValidatorIndex(reward.ValidatorIndex)] = &dbResult{ + ValidatorIndex: reward.ValidatorIndex, + AttestationReward: decimal.NewFromInt(int64(reward.Head) + int64(reward.Target) + int64(reward.Source) + int64(reward.InclusionDelay) + int64(reward.Inactivity)), + AttestationIdealReward: idealRewardsMap[effectiveBalanceMap[reward.ValidatorIndex]], + } + } + + log.Info("retrieving block proposal data") + proposalAssignments, err := mc.CL.GetPropoalAssignments(epoch) + if err != nil { + return fmt.Errorf("error getting proposal assignments: %w", err) + } + for _, assignment := range proposalAssignments.Data { + efficiencyMap[types.ValidatorIndex(assignment.ValidatorIndex)].BlocksScheduled++ + } + + syncAssignments, err := mc.CL.GetSyncCommitteesAssignments(nil, epoch*utils.Config.Chain.ClConfig.SlotsPerEpoch) + if err != nil { + return fmt.Errorf("error getting sync committee assignments: %w", err) + } + + for slot := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch; slot < (epoch+1)*utils.Config.Chain.ClConfig.SlotsPerEpoch; slot++ { + log.Infof("retrieving data for slot %v", slot) + s, err := mc.CL.GetSlot(slot) + if err != nil && strings.Contains(err.Error(), "NOT_FOUND") { + continue + } else if err != nil { + return fmt.Errorf("error getting block header for slot %v: %w", slot, err) + } + efficiencyMap[types.ValidatorIndex(s.Data.Message.ProposerIndex)].BlocksProposed++ + + for i, validatorIndex := range syncAssignments.Data.Validators { + efficiencyMap[types.ValidatorIndex(validatorIndex)].SyncScheduled++ + + if utils.BitAtVector(s.Data.Message.Body.SyncAggregate.SyncCommitteeBits, i) { + efficiencyMap[types.ValidatorIndex(validatorIndex)].SyncExecuted++ + } + } + } + + subMap, err := GetSubsForEventFilter(types.ValidatorGroupEfficiencyEventName, "", nil, nil) + if err != nil { + return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) + } + + // create a lookup map for the dashboard & groups + type groupDetails struct { + Validators []types.ValidatorIndex + Subscription *types.Subscription + } + dashboardMap := make(map[types.UserId]map[types.DashboardId]map[types.DashboardGroupId]*groupDetails) + + for _, subs := range subMap { + for _, sub := range subs { + if sub.DashboardId == nil || sub.DashboardGroupId == nil { + continue + } + userId := *sub.UserID + dashboardId := types.DashboardId(*sub.DashboardId) + groupId := types.DashboardGroupId(*sub.DashboardGroupId) + if _, ok := dashboardMap[userId]; !ok { + dashboardMap[userId] = make(map[types.DashboardId]map[types.DashboardGroupId]*groupDetails) + } + if _, ok := dashboardMap[userId][dashboardId]; !ok { + dashboardMap[userId][dashboardId] = make(map[types.DashboardGroupId]*groupDetails) + } + if _, ok := dashboardMap[userId][dashboardId][groupId]; !ok { + dashboardMap[userId][dashboardId][groupId] = &groupDetails{ + Validators: []types.ValidatorIndex{}, + } + } + if sub.EventFilter != "" { + pubkeyDecoded, err := hex.DecodeString(sub.EventFilter) + if err != nil { + return fmt.Errorf("error decoding pubkey %v: %w", sub.EventFilter, err) + } + validatorIndex, err := GetIndexForPubkey(pubkeyDecoded) + if err != nil { + return fmt.Errorf("error getting validator index for pubkey %v: %w", sub.EventFilter, err) + } + dashboardMap[userId][dashboardId][groupId].Validators = append(dashboardMap[*sub.UserID][dashboardId][groupId].Validators, types.ValidatorIndex(validatorIndex)) + } + dashboardMap[userId][dashboardId][groupId].Subscription = sub + } + } + + // The commented code below can be used to validate data retrieved from the node against + // data in clickhouse + // var queryResult []*dbResult + // clickhouseTable := "validator_dashboard_data_epoch" + // // retrieve efficiency data for the epoch + // log.Infof("retrieving efficiency data for epoch %v", epoch) + // ds := goqu.Dialect("postgres"). + // From(goqu.L(fmt.Sprintf(`%s AS r`, clickhouseTable))). + // Select( + // goqu.L("validator_index"), + // goqu.L("COALESCE(r.attestations_reward, 0) AS attestations_reward"), + // goqu.L("COALESCE(r.attestations_ideal_reward, 0) AS attestations_ideal_reward"), + // goqu.L("COALESCE(r.blocks_proposed, 0) AS blocks_proposed"), + // goqu.L("COALESCE(r.blocks_scheduled, 0) AS blocks_scheduled"), + // goqu.L("COALESCE(r.sync_executed, 0) AS sync_executed"), + // goqu.L("COALESCE(r.sync_scheduled, 0) AS sync_scheduled")). + // Where(goqu.L("r.epoch_timestamp = ?", utils.EpochToTime(epoch))) + // query, args, err := ds.Prepared(true).ToSQL() + // if err != nil { + // return fmt.Errorf("error preparing query: %v", err) + // } + + // err = db.ClickHouseReader.Select(&queryResult, query, args...) + // if err != nil { + // return fmt.Errorf("error retrieving data from table %s: %v", clickhouseTable, err) + // } + + // if len(queryResult) == 0 { + // return fmt.Errorf("no efficiency data found for epoch %v", epoch) + // } + + // log.Infof("retrieved %v efficiency data rows", len(queryResult)) + + // for _, row := range queryResult { + // if _, ok := activeValidatorsMap[row.ValidatorIndex]; !ok { + // continue + // } + // existing := efficiencyMap[types.ValidatorIndex(row.ValidatorIndex)] + + // if existing == nil { + // existing = &dbResult{ + // ValidatorIndex: row.ValidatorIndex, + // AttestationReward: decimal.Decimal{}, + // AttestationIdealReward: decimal.Decimal{}, + // } + // } + // if !existing.AttestationIdealReward.Equal(row.AttestationIdealReward) { + // log.Fatal(fmt.Errorf("ideal reward mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.AttestationIdealReward, row.AttestationIdealReward), "ideal reward mismatch", 0) + // } + // if !existing.AttestationReward.Equal(row.AttestationReward) { + // log.Fatal(fmt.Errorf("attestation reward mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.AttestationReward, row.AttestationReward), "attestation reward mismatch", 0) + // } + // if existing.BlocksProposed != row.BlocksProposed { + // log.Fatal(fmt.Errorf("blocks proposed mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.BlocksProposed, row.BlocksProposed), "blocks proposed mismatch", 0) + // } + // if existing.BlocksScheduled != row.BlocksScheduled { + // log.Fatal(fmt.Errorf("blocks scheduled mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.BlocksScheduled, row.BlocksScheduled), "blocks scheduled mismatch", 0) + // } + // if existing.SyncExecuted != row.SyncExecuted { + // log.Fatal(fmt.Errorf("sync executed mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.SyncExecuted, row.SyncExecuted), "sync executed mismatch", 0) + // } + // if existing.SyncScheduled != row.SyncScheduled { + // log.Fatal(fmt.Errorf("sync scheduled mismatch for validator %v: %v != %v", row.ValidatorIndex, existing.SyncScheduled, row.SyncScheduled), "sync scheduled mismatch", 0) + // } + // efficiencyMap[types.ValidatorIndex(row.ValidatorIndex)] = row + // } + + for userId, dashboards := range dashboardMap { + for dashboardId, groups := range dashboards { + for groupId, groupDetails := range groups { + attestationReward := decimal.Decimal{} + attestationIdealReward := decimal.Decimal{} + blocksProposed := uint64(0) + blocksScheduled := uint64(0) + syncExecuted := uint64(0) + syncScheduled := uint64(0) + + for _, validatorIndex := range groupDetails.Validators { + if row, ok := efficiencyMap[validatorIndex]; ok { + attestationReward = attestationReward.Add(row.AttestationReward) + attestationIdealReward = attestationIdealReward.Add(row.AttestationIdealReward) + blocksProposed += row.BlocksProposed + blocksScheduled += row.BlocksScheduled + syncExecuted += row.SyncExecuted + syncScheduled += row.SyncScheduled + } + } + + var attestationEfficiency, proposerEfficiency, syncEfficiency sql.NullFloat64 + + if !attestationIdealReward.IsZero() { + attestationEfficiency.Float64 = attestationReward.Div(attestationIdealReward).InexactFloat64() + attestationEfficiency.Valid = true + } + if blocksScheduled > 0 { + proposerEfficiency.Float64 = float64(blocksProposed) / float64(blocksScheduled) + proposerEfficiency.Valid = true + } + if syncScheduled > 0 { + syncEfficiency.Float64 = float64(syncExecuted) / float64(syncScheduled) + syncEfficiency.Valid = true + } + + efficiency := utils.CalculateTotalEfficiency(attestationEfficiency, proposerEfficiency, syncEfficiency) + + log.Infof("efficiency: %v, threshold: %v", efficiency, groupDetails.Subscription.EventThreshold*100) + + if efficiency < groupDetails.Subscription.EventThreshold*100 { + log.Infof("creating group efficiency notification for user %v, dashboard %v, group %v in epoch %v", userId, dashboardId, groupId, epoch) + n := &ValidatorGroupEfficiencyNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *groupDetails.Subscription.ID, + UserID: *groupDetails.Subscription.UserID, + Epoch: epoch, + EventName: groupDetails.Subscription.EventName, + EventFilter: "-", + DashboardId: groupDetails.Subscription.DashboardId, + DashboardName: groupDetails.Subscription.DashboardName, + DashboardGroupId: groupDetails.Subscription.DashboardGroupId, + DashboardGroupName: groupDetails.Subscription.DashboardGroupName, + }, + Threshold: groupDetails.Subscription.EventThreshold * 100, + Efficiency: efficiency, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } + } + } + } + + log.Info("done collecting group efficiency notifications") + + return nil +} +func collectBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, status uint64, eventName types.EventName, epoch uint64) error { type dbResult struct { Proposer uint64 `db:"proposer"` Status uint64 `db:"status"` @@ -430,7 +736,7 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications ExecRewardETH float64 } - subMap, err := GetSubsForEventFilter(eventName, "", nil, nil, validatorDashboardConfig) + subMap, err := GetSubsForEventFilter(eventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) } @@ -529,9 +835,9 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications } // collectAttestationAndOfflineValidatorNotifications collects notifications for missed attestations and offline validators -func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { // Retrieve subscriptions for missed attestations - subMapAttestationMissed, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName, "", nil, nil, validatorDashboardConfig) + subMapAttestationMissed, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) } @@ -703,7 +1009,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty return fmt.Errorf("retrieved more than %v online validators notifications: %v, exiting", onlineValidatorsLimit, len(onlineValidators)) } - subMapOnlineOffline, err := GetSubsForEventFilter(types.ValidatorIsOfflineEventName, "", nil, nil, validatorDashboardConfig) + subMapOnlineOffline, err := GetSubsForEventFilter(types.ValidatorIsOfflineEventName, "", nil, nil) if err != nil { return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorIsOfflineEventName, err) } @@ -731,7 +1037,6 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty DashboardGroupName: sub.DashboardGroupName, }, ValidatorIndex: validator.Index, - IsOffline: true, } notificationsByUserID.AddNotification(n) @@ -749,12 +1054,12 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) - n := &ValidatorIsOfflineNotification{ + n := &ValidatorIsOnlineNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ SubscriptionID: *sub.ID, UserID: *sub.UserID, Epoch: epoch, - EventName: sub.EventName, + EventName: types.ValidatorIsOnlineEventName, EventFilter: hex.EncodeToString(validator.Pubkey), LatestState: "-", DashboardId: sub.DashboardId, @@ -763,7 +1068,6 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty DashboardGroupName: sub.DashboardGroupName, }, ValidatorIndex: validator.Index, - IsOffline: false, } notificationsByUserID.AddNotification(n) @@ -843,7 +1147,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty return nil } -func collectValidatorGotSlashedNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectValidatorGotSlashedNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { dbResult, err := db.GetValidatorsGotSlashed(epoch) if err != nil { return fmt.Errorf("error getting slashed validators from database, err: %w", err) @@ -854,7 +1158,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific pubkeyToSlashingInfoMap[pubkeyStr] = event } - subscribedUsers, err := GetSubsForEventFilter(types.ValidatorGotSlashedEventName, "", nil, nil, validatorDashboardConfig) + subscribedUsers, err := GetSubsForEventFilter(types.ValidatorGotSlashedEventName, "", nil, nil) if err != nil { return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorGotSlashedEventName, err) } @@ -893,9 +1197,9 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific } // collectWithdrawalNotifications collects all notifications validator withdrawals -func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) - subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName, "", nil, nil, validatorDashboardConfig) + subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missed attestations %w", err) } @@ -906,7 +1210,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer return fmt.Errorf("error getting withdrawals from database, err: %w", err) } - // log.Infof("retrieved %v events", len(events)) + log.Infof("retrieved %v events", len(events)) for _, event := range events { subscribers, ok := subMap[hex.EncodeToString(event.Pubkey)] if ok { @@ -920,7 +1224,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer continue } } - // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) + log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) n := &ValidatorWithdrawalNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ SubscriptionID: *sub.ID, @@ -931,6 +1235,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer DashboardName: sub.DashboardName, DashboardGroupId: sub.DashboardGroupId, DashboardGroupName: sub.DashboardGroupName, + Epoch: epoch, }, ValidatorIndex: event.ValidatorIndex, Epoch: epoch, @@ -966,8 +1271,7 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU types.EthClientUpdateEventName, "((last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > last_sent_ts) OR last_sent_ts IS NULL)", []interface{}{client.Date.Unix()}, - []string{strings.ToLower(client.Name)}, - nil) + []string{strings.ToLower(client.Name)}) if err != nil { return err } @@ -1086,7 +1390,6 @@ func collectMonitoringMachine( "(created_epoch <= ? AND (last_sent_epoch < ? OR last_sent_epoch IS NULL))", []interface{}{epoch, int64(epoch) - int64(epochWaitInBetween)}, nil, - nil, ) // TODO: clarify why we need grouping here?! @@ -1134,8 +1437,8 @@ func collectMonitoringMachine( } //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) - if notifyConditionFulfilled(&localData, currentMachineData) { - result = append(result, &localData) + if notifyConditionFulfilled(localData, currentMachineData) { + result = append(result, localData) } } } @@ -1234,7 +1537,6 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif "(last_sent_ts < ? OR (last_sent_ts IS NULL AND created_ts < ?))", []interface{}{firstDayOfMonth, firstDayOfMonth}, nil, - nil, ) if err != nil { return err @@ -1286,7 +1588,6 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse "(last_sent_ts <= NOW() - INTERVAL '1 hour' OR last_sent_ts IS NULL)", nil, nil, - nil, ) if err != nil { return err @@ -1317,7 +1618,7 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse return nil } -func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId) error { fee := 0.0 err := db.WriterDb.Get(&fee, ` select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; @@ -1340,7 +1641,6 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific "(last_sent_ts <= NOW() - INTERVAL '8 hours' OR last_sent_ts IS NULL) AND (event_threshold <= ? OR (event_threshold < 0 AND event_threshold * -1 >= ?))", []interface{}{fee, fee}, nil, - validatorDashboardConfig, ) if err != nil { return err @@ -1372,7 +1672,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific return nil } -func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId) error { var ts int64 err := db.WriterDb.Get(&ts, ` select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; @@ -1397,7 +1697,6 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. "(last_sent_ts <= NOW() - INTERVAL '5 hours' OR last_sent_ts IS NULL)", nil, nil, - validatorDashboardConfig, ) if err != nil { return err @@ -1428,13 +1727,12 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. return nil } -func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { subMap, err := GetSubsForEventFilter( eventName, "(last_sent_ts <= NOW() - INTERVAL '24 hours' OR last_sent_ts IS NULL)", // send out this notification type only once per day nil, - nil, - validatorDashboardConfig) + nil) if err != nil { return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) } @@ -1570,7 +1868,7 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.Not return nil } -func collectSyncCommitteeNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64, validatorDashboardConfig *types.ValidatorDashboardConfig) error { +func collectSyncCommitteeNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee nextPeriod := currentPeriod + 1 @@ -1594,7 +1892,7 @@ func collectSyncCommitteeNotifications(notificationsByUserID types.Notifications mapping[val.PubKey] = val.Index } - dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoonEventName, "(last_sent_ts <= NOW() - INTERVAL '26 hours' OR last_sent_ts IS NULL)", nil, nil, validatorDashboardConfig) + dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoonEventName, "(last_sent_ts <= NOW() - INTERVAL '26 hours' OR last_sent_ts IS NULL)", nil, nil) if err != nil { return err diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 81b58bf16..56c216fb5 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -5,6 +5,7 @@ import ( "fmt" "strconv" "strings" + "time" "github.com/doug-martin/goqu/v9" "github.com/gobitfly/beaconchain/pkg/commons/db" @@ -21,8 +22,8 @@ import ( // or a machine name for machine notifications or a eth client name for ethereum client update notifications // optionally it is possible to set a filter on the last sent ts and the event filter // fields -func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, lastSentFilterArgs []interface{}, eventFilters []string, validatorDashboardConfig *types.ValidatorDashboardConfig) (map[string][]types.Subscription, error) { - var subs []types.Subscription +func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, lastSentFilterArgs []interface{}, eventFilters []string) (map[string][]*types.Subscription, error) { + var subs []*types.Subscription // subQuery := ` // SELECT @@ -53,7 +54,9 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las goqu.C("event_name"), ).Join(goqu.T("users"), goqu.On(goqu.T("users").Col("id").Eq(goqu.T("users_subscriptions").Col("user_id")))). Where(goqu.L("(event_name = ? AND user_id <> 0)", eventNameForQuery)). - Where(goqu.L("(users.notifications_do_not_disturb_ts IS NULL OR users.notifications_do_not_disturb_ts < NOW())")) + Where(goqu.L("(users.notifications_do_not_disturb_ts IS NULL OR users.notifications_do_not_disturb_ts < NOW())")). + // filter out users that have all notification channels disabled + Where(goqu.L("(select bool_or(active) from users_notification_channels where users_notification_channels.user_id = users_subscriptions.user_id)")) if lastSentFilter != "" { if len(lastSentFilterArgs) > 0 { @@ -71,7 +74,7 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las return nil, err } - subMap := make(map[string][]types.Subscription, 0) + subMap := make(map[string][]*types.Subscription, 0) err = db.FrontendWriterDB.Select(&subs, query, args...) if err != nil { return nil, err @@ -79,7 +82,10 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las log.Infof("found %d subscriptions for event %s", len(subs), eventName) + dashboardConfigsToFetch := make([]types.DashboardId, 0) for _, sub := range subs { + // sub.LastEpoch = &zero + // sub.LastSent = &time.Time{} sub.EventName = types.EventName(strings.Replace(string(sub.EventName), utils.GetNetwork()+":", "", 1)) // remove the network name from the event name if strings.HasPrefix(sub.EventFilter, "vdb:") { dashboardData := strings.Split(sub.EventFilter, ":") @@ -100,42 +106,158 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las continue } sub.DashboardGroupId = &dashboardGroupId - if dashboard, ok := validatorDashboardConfig.DashboardsById[types.DashboardId(dashboardId)]; ok { - if dashboard.Name == "" { - dashboard.Name = fmt.Sprintf("Dashboard %d", dashboardId) + + dashboardConfigsToFetch = append(dashboardConfigsToFetch, types.DashboardId(dashboardId)) + } else { + if _, ok := subMap[sub.EventFilter]; !ok { + subMap[sub.EventFilter] = make([]*types.Subscription, 0) + } + subMap[sub.EventFilter] = append(subMap[sub.EventFilter], sub) + } + } + + if len(dashboardConfigsToFetch) > 0 { + log.Infof("fetching dashboard configurations for %d dashboards (%v)", len(dashboardConfigsToFetch), dashboardConfigsToFetch) + dashboardConfigRetrievalStartTs := time.Now() + type dashboardDefinitionRow struct { + DashboardId types.DashboardId `db:"dashboard_id"` + DashboardName string `db:"dashboard_name"` + UserId types.UserId `db:"user_id"` + GroupId types.DashboardGroupId `db:"group_id"` + GroupName string `db:"group_name"` + ValidatorIndex types.ValidatorIndex `db:"validator_index"` + WebhookTarget string `db:"webhook_target"` + WebhookFormat string `db:"webhook_format"` + } + var dashboardDefinitions []dashboardDefinitionRow + err = db.AlloyWriter.Select(&dashboardDefinitions, ` + SELECT + users_val_dashboards.id as dashboard_id, + users_val_dashboards.name as dashboard_name, + users_val_dashboards.user_id, + users_val_dashboards_groups.id as group_id, + users_val_dashboards_groups.name as group_name, + users_val_dashboards_validators.validator_index, + COALESCE(users_val_dashboards_groups.webhook_target, '') AS webhook_target, + COALESCE(users_val_dashboards_groups.webhook_format, '') AS webhook_format + FROM users_val_dashboards + LEFT JOIN users_val_dashboards_groups ON users_val_dashboards_groups.dashboard_id = users_val_dashboards.id + LEFT JOIN users_val_dashboards_validators ON users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id + WHERE users_val_dashboards_validators.validator_index IS NOT NULL AND users_val_dashboards.id = ANY($1) + `, pq.Array(dashboardConfigsToFetch)) + if err != nil { + return nil, fmt.Errorf("error getting dashboard definitions: %v", err) + } + log.Infof("retrieved %d dashboard definitions", len(dashboardDefinitions)) + + // Now initialize the validator dashboard configuration map + validatorDashboardConfig := &types.ValidatorDashboardConfig{ + DashboardsById: make(map[types.DashboardId]*types.ValidatorDashboard), + RocketpoolNodeByPubkey: make(map[string]string), + } + for _, row := range dashboardDefinitions { + if validatorDashboardConfig.DashboardsById[row.DashboardId] == nil { + validatorDashboardConfig.DashboardsById[row.DashboardId] = &types.ValidatorDashboard{ + Name: row.DashboardName, + Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), } - if group, ok := dashboard.Groups[types.DashboardGroupId(dashboardGroupId)]; ok { - if group.Name == "" { - group.Name = "default" - } + } + if validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId] == nil { + validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ + Name: row.GroupName, + Validators: []uint64{}, + } + } + validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsById[row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) + } - uniqueRPLNodes := make(map[string]struct{}) + log.Infof("retrieving dashboard definitions took: %v", time.Since(dashboardConfigRetrievalStartTs)) + + // Now collect the mapping of rocketpool node addresses to validator pubkeys + // This is needed for the rocketpool notifications + type rocketpoolNodeRow struct { + Pubkey []byte `db:"pubkey"` + NodeAddress []byte `db:"node_address"` + } - for _, validatorIndex := range group.Validators { - validatorEventFilterRaw, err := GetPubkeyForIndex(validatorIndex) - if err != nil { - log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validator": validatorIndex}) - continue + var rocketpoolNodes []rocketpoolNodeRow + err = db.AlloyWriter.Select(&rocketpoolNodes, ` + SELECT + pubkey, + node_address + FROM rocketpool_minipools;`) + if err != nil { + return nil, fmt.Errorf("error getting rocketpool node addresses: %v", err) + } + + for _, row := range rocketpoolNodes { + validatorDashboardConfig.RocketpoolNodeByPubkey[hex.EncodeToString(row.Pubkey)] = hex.EncodeToString(row.NodeAddress) + } + + //log.Infof("retrieved %d rocketpool node addresses", len(rocketpoolNodes)) + + for _, sub := range subs { + if strings.HasPrefix(sub.EventFilter, "vdb:") { + //log.Infof("hydrating subscription for dashboard %d and group %d for user %d", *sub.DashboardId, *sub.DashboardGroupId, *sub.UserID) + if dashboard, ok := validatorDashboardConfig.DashboardsById[types.DashboardId(*sub.DashboardId)]; ok { + if dashboard.Name == "" { + dashboard.Name = fmt.Sprintf("Dashboard %d", *sub.DashboardId) + } + if group, ok := dashboard.Groups[types.DashboardGroupId(*sub.DashboardGroupId)]; ok { + if group.Name == "" { + group.Name = "default" } - validatorEventFilter := hex.EncodeToString(validatorEventFilterRaw) - - if eventName == types.RocketpoolCollateralMaxReachedEventName || eventName == types.RocketpoolCollateralMinReachedEventName { - // Those two RPL notifications are not tied to a specific validator but to a node address, create a subscription for each - // node in the group - nodeAddress, ok := validatorDashboardConfig.RocketpoolNodeByPubkey[validatorEventFilter] - if !ok { - // Validator is not a rocketpool minipool + + uniqueRPLNodes := make(map[string]struct{}) + + for _, validatorIndex := range group.Validators { + validatorEventFilterRaw, err := GetPubkeyForIndex(validatorIndex) + if err != nil { + log.Error(err, "error retrieving pubkey for validator", 0, map[string]interface{}{"validator": validatorIndex}) continue } - if _, ok := uniqueRPLNodes[nodeAddress]; !ok { - if _, ok := subMap[nodeAddress]; !ok { - subMap[nodeAddress] = make([]types.Subscription, 0) + validatorEventFilter := hex.EncodeToString(validatorEventFilterRaw) + + if eventName == types.RocketpoolCollateralMaxReachedEventName || eventName == types.RocketpoolCollateralMinReachedEventName { + // Those two RPL notifications are not tied to a specific validator but to a node address, create a subscription for each + // node in the group + nodeAddress, ok := validatorDashboardConfig.RocketpoolNodeByPubkey[validatorEventFilter] + if !ok { + // Validator is not a rocketpool minipool + continue } - hydratedSub := types.Subscription{ + if _, ok := uniqueRPLNodes[nodeAddress]; !ok { + if _, ok := subMap[nodeAddress]; !ok { + subMap[nodeAddress] = make([]*types.Subscription, 0) + } + hydratedSub := &types.Subscription{ + ID: sub.ID, + UserID: sub.UserID, + EventName: sub.EventName, + EventFilter: nodeAddress, + LastSent: sub.LastSent, + LastEpoch: sub.LastEpoch, + CreatedTime: sub.CreatedTime, + CreatedEpoch: sub.CreatedEpoch, + EventThreshold: sub.EventThreshold, + DashboardId: sub.DashboardId, + DashboardName: dashboard.Name, + DashboardGroupId: sub.DashboardGroupId, + DashboardGroupName: group.Name, + } + subMap[nodeAddress] = append(subMap[nodeAddress], hydratedSub) + //log.Infof("hydrated subscription for validator %v of dashboard %d and group %d for user %d", hydratedSub.EventFilter, *hydratedSub.DashboardId, *hydratedSub.DashboardGroupId, *hydratedSub.UserID) + } + uniqueRPLNodes[nodeAddress] = struct{}{} + } else { + if _, ok := subMap[validatorEventFilter]; !ok { + subMap[validatorEventFilter] = make([]*types.Subscription, 0) + } + hydratedSub := &types.Subscription{ ID: sub.ID, UserID: sub.UserID, EventName: sub.EventName, - EventFilter: nodeAddress, + EventFilter: validatorEventFilter, LastSent: sub.LastSent, LastEpoch: sub.LastEpoch, CreatedTime: sub.CreatedTime, @@ -146,41 +268,15 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las DashboardGroupId: sub.DashboardGroupId, DashboardGroupName: group.Name, } - subMap[nodeAddress] = append(subMap[nodeAddress], hydratedSub) - } - uniqueRPLNodes[nodeAddress] = struct{}{} - } else { - if _, ok := subMap[validatorEventFilter]; !ok { - subMap[validatorEventFilter] = make([]types.Subscription, 0) + subMap[validatorEventFilter] = append(subMap[validatorEventFilter], hydratedSub) + //log.Infof("hydrated subscription for validator %v of dashboard %d and group %d for user %d", hydratedSub.EventFilter, *hydratedSub.DashboardId, *hydratedSub.DashboardGroupId, *hydratedSub.UserID) } - hydratedSub := types.Subscription{ - ID: sub.ID, - UserID: sub.UserID, - EventName: sub.EventName, - EventFilter: validatorEventFilter, - LastSent: sub.LastSent, - LastEpoch: sub.LastEpoch, - CreatedTime: sub.CreatedTime, - CreatedEpoch: sub.CreatedEpoch, - EventThreshold: sub.EventThreshold, - DashboardId: sub.DashboardId, - DashboardName: dashboard.Name, - DashboardGroupId: sub.DashboardGroupId, - DashboardGroupName: group.Name, - } - subMap[validatorEventFilter] = append(subMap[validatorEventFilter], hydratedSub) } - - //log.Infof("hydrated subscription for validator %v of dashboard %d and group %d for user %d", hydratedSub.EventFilter, *hydratedSub.DashboardId, *hydratedSub.DashboardGroupId, *hydratedSub.UserID) } } } - } else { - if _, ok := subMap[sub.EventFilter]; !ok { - subMap[sub.EventFilter] = make([]types.Subscription, 0) - } - subMap[sub.EventFilter] = append(subMap[sub.EventFilter], sub) } + //log.Infof("hydrated %d subscriptions for event %s", len(subMap), eventName) } return subMap, nil diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 084bdf4f2..7bb71162a 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -3,15 +3,21 @@ package notification import ( "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/exporter/modules" ) // Used for isolated testing func GetNotificationsForEpoch(pubkeyCachePath string, epoch uint64) (types.NotificationsPerUserId, error) { - err := initPubkeyCache(pubkeyCachePath) + mc, err := modules.GetModuleContext() + if err != nil { + log.Fatal(err, "error getting module context", 0) + } + + err = initPubkeyCache(pubkeyCachePath) if err != nil { log.Fatal(err, "error initializing pubkey cache path for notifications", 0) } - return collectNotifications(epoch) + return collectNotifications(epoch, mc) } // Used for isolated testing diff --git a/backend/pkg/notification/pubkey_cache.go b/backend/pkg/notification/pubkey_cache.go index 483884cf5..37f3f5ea2 100644 --- a/backend/pkg/notification/pubkey_cache.go +++ b/backend/pkg/notification/pubkey_cache.go @@ -72,7 +72,7 @@ func GetIndexForPubkey(pubkey []byte) (uint64, error) { if err != nil { return 0, err } - log.Infof("serving index %d for validator %x from db", index, pubkey) + // log.Infof("serving index %d for validator %x from db", index, pubkey) return index, nil } else if err != nil { return 0, err diff --git a/backend/pkg/notification/queuing.go b/backend/pkg/notification/queuing.go index 3367b82a9..803a618c8 100644 --- a/backend/pkg/notification/queuing.go +++ b/backend/pkg/notification/queuing.go @@ -296,6 +296,8 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific bodyDetails := template.HTML("") + totalBlockReward := float64(0) + for _, event := range types.EventSortOrder { for _, notificationsPerGroup := range notificationsPerDashboard { for _, userNotifications := range notificationsPerGroup { @@ -303,10 +305,6 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific if !ok { // nothing to do for this event type continue } - - if len(bodyDetails) > 0 { - bodyDetails += "
" - } //nolint:gosec // this is a static string bodyDetails += template.HTML(fmt.Sprintf("%s
", types.EventLabel[event])) i := 0 @@ -329,6 +327,15 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific } } + if event == types.ValidatorExecutedProposalEventName { + proposalNotification, ok := n.(*ValidatorProposalNotification) + if !ok { + log.Error(fmt.Errorf("error casting proposal notification"), "", 0) + continue + } + totalBlockReward += proposalNotification.Reward + } + metrics.NotificationsQueued.WithLabelValues("email", string(event)).Inc() i++ @@ -344,20 +351,18 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific //nolint:gosec // this is a static string bodyDetails += template.HTML(fmt.Sprintf("%s
", eventInfo)) } + bodyDetails += "
" } } } //nolint:gosec // this is a static string - bodySummary := template.HTML(fmt.Sprintf("
Summary for epoch %d:
", epoch)) + bodySummary := template.HTML(fmt.Sprintf("

Summary for epoch %d:

", epoch)) for _, event := range types.EventSortOrder { count, ok := notificationTypesMap[event] if !ok { continue } - if len(bodySummary) > 0 { - bodySummary += "
" - } plural := "" if count > 1 { plural = "s" @@ -372,13 +377,20 @@ func RenderEmailsForUserEvents(epoch uint64, notificationsByUserID types.Notific case types.EthClientUpdateEventName: //nolint:gosec // this is a static string bodySummary += template.HTML(fmt.Sprintf("%s: %d client%s", types.EventLabel[event], count, plural)) + case types.ValidatorExecutedProposalEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d validator%s, Reward: %.3f ETH", types.EventLabel[event], count, plural, totalBlockReward)) + case types.ValidatorGroupEfficiencyEventName: + //nolint:gosec // this is a static string + bodySummary += template.HTML(fmt.Sprintf("%s: %d Group%s", types.EventLabel[event], count, plural)) default: //nolint:gosec // this is a static string bodySummary += template.HTML(fmt.Sprintf("%s: %d Validator%s", types.EventLabel[event], count, plural)) } + bodySummary += "
" } msg.Body += bodySummary - msg.Body += template.HTML("

Details:
") + msg.Body += template.HTML("

Details:

") msg.Body += bodyDetails if len(uniqueNotificationTypes) > 2 { @@ -412,6 +424,9 @@ func QueueEmailNotifications(epoch uint64, notificationsByUserID types.Notificat // now batch insert the emails in one go log.Infof("queueing %v email notifications", len(emails)) + if len(emails) == 0 { + return nil + } type insertData struct { Content types.TransitEmailContent `db:"content"` } @@ -452,6 +467,7 @@ func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.N notificationTypesMap := make(map[types.EventName][]string) + totalBlockReward := float64(0) for _, event := range types.EventSortOrder { ns, ok := userNotifications[event] if !ok { // nothing to do for this event type @@ -462,6 +478,15 @@ func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.N } for _, n := range ns { notificationTypesMap[event] = append(notificationTypesMap[event], n.GetEntitiyId()) + + if event == types.ValidatorExecutedProposalEventName { + proposalNotification, ok := n.(*ValidatorProposalNotification) + if !ok { + log.Error(fmt.Errorf("error casting proposal notification"), "", 0) + continue + } + totalBlockReward += proposalNotification.Reward + } } metrics.NotificationsQueued.WithLabelValues("push", string(event)).Inc() } @@ -489,15 +514,16 @@ func RenderPushMessagesForUserEvents(epoch uint64, notificationsByUserID types.N bodySummary += fmt.Sprintf("%s: %d client%s", types.EventLabel[event], count, plural) case types.MonitoringMachineCpuLoadEventName, types.MonitoringMachineMemoryUsageEventName, types.MonitoringMachineDiskAlmostFullEventName, types.MonitoringMachineOfflineEventName: bodySummary += fmt.Sprintf("%s: %d machine%s", types.EventLabel[event], count, plural) + case types.ValidatorExecutedProposalEventName: + bodySummary += fmt.Sprintf("%s: %d validator%s, Reward: %.3f ETH", types.EventLabel[event], count, plural, totalBlockReward) + case types.ValidatorGroupEfficiencyEventName: + bodySummary += fmt.Sprintf("%s: %d group%s", types.EventLabel[event], count, plural) default: bodySummary += fmt.Sprintf("%s: %d validator%s", types.EventLabel[event], count, plural) } - truncated := "" - if len(events) > 3 { - truncated = ",..." - events = events[:3] + if len(events) < 3 { + bodySummary += fmt.Sprintf(" (%s)", strings.Join(events, ",")) } - bodySummary += fmt.Sprintf(" (%s%s)", strings.Join(events, ","), truncated) } if len(bodySummary) > 1000 { // cap the notification body to 1000 characters (firebase limit) @@ -540,6 +566,9 @@ func QueuePushNotification(epoch uint64, notificationsByUserID types.Notificatio // now batch insert the push messages in one go log.Infof("queueing %v push notifications", len(pushMessages)) + if len(pushMessages) == 0 { + return nil + } type insertData struct { Content types.TransitPushContent `db:"content"` } diff --git a/backend/pkg/notification/sending.go b/backend/pkg/notification/sending.go index fe148ca53..699f4c3f2 100644 --- a/backend/pkg/notification/sending.go +++ b/backend/pkg/notification/sending.go @@ -17,6 +17,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/services" "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) @@ -268,7 +269,8 @@ func sendWebhookNotifications() error { } resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) if err != nil { - log.Error(err, "error sending webhook request", 0) + log.Warnf("error sending webhook request: %v", err) + metrics.NotificationsSent.WithLabelValues("webhook", "error").Inc() return } else { metrics.NotificationsSent.WithLabelValues("webhook", resp.Status).Inc() @@ -393,7 +395,8 @@ func sendDiscordNotifications() error { resp, err := client.Post(webhook.Url, "application/json", reqBody) if err != nil { - log.Error(err, "error sending discord webhook request", 0) + log.Warnf("failed sending discord webhook request %v: %v", webhook.ID, err) + metrics.NotificationsSent.WithLabelValues("webhook_discord", "error").Inc() } else { metrics.NotificationsSent.WithLabelValues("webhook_discord", resp.Status).Inc() } @@ -413,10 +416,8 @@ func sendDiscordNotifications() error { errResp.Status = resp.Status resp.Body.Close() - if resp.StatusCode == http.StatusTooManyRequests { - log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) - } else { - log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) + if resp.StatusCode != http.StatusOK { + log.WarnWithFields(map[string]interface{}{"errResp.Body": utils.FirstN(errResp.Body, 1000), "webhook.Url": webhook.Url}, "error pushing discord webhook") } _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) if err != nil { diff --git a/backend/pkg/notification/types.go b/backend/pkg/notification/types.go index 9028ffa7e..754955cda 100644 --- a/backend/pkg/notification/types.go +++ b/backend/pkg/notification/types.go @@ -51,7 +51,7 @@ func formatSlotLink(format types.NotificationFormat, slot interface{}) string { return "" } -func formatDashboardAndGroupLink(format types.NotificationFormat, n types.Notification) string { +func formatValidatorPrefixedDashboardAndGroupLink(format types.NotificationFormat, n types.Notification) string { dashboardAndGroupInfo := "" if n.GetDashboardId() != nil { switch format { @@ -66,6 +66,21 @@ func formatDashboardAndGroupLink(format types.NotificationFormat, n types.Notifi return dashboardAndGroupInfo } +func formatPureDashboardAndGroupLink(format types.NotificationFormat, n types.Notification) string { + dashboardAndGroupInfo := "" + if n.GetDashboardId() != nil { + switch format { + case types.NotifciationFormatHtml: + dashboardAndGroupInfo = fmt.Sprintf(`Group %[2]v in Dashboard %[3]v`, utils.Config.Frontend.SiteDomain, n.GetDashboardGroupName(), n.GetDashboardName(), *n.GetDashboardId()) + case types.NotifciationFormatText: + dashboardAndGroupInfo = fmt.Sprintf(`Group %[1]v in Dashboard %[2]v`, n.GetDashboardGroupName(), n.GetDashboardName()) + case types.NotifciationFormatMarkdown: + dashboardAndGroupInfo = fmt.Sprintf(`Group **%[1]v** in Dashboard [%[2]v](https://%[3]v/dashboard/%[4]v)`, n.GetDashboardGroupName(), n.GetDashboardName(), utils.Config.Frontend.SiteDomain, *n.GetDashboardId()) + } + } + return dashboardAndGroupInfo +} + type ValidatorProposalNotification struct { types.NotificationBaseImpl @@ -83,7 +98,7 @@ func (n *ValidatorProposalNotification) GetEntitiyId() string { func (n *ValidatorProposalNotification) GetInfo(format types.NotificationFormat) string { vali := formatValidatorLink(format, n.ValidatorIndex) slot := formatSlotLink(format, n.Slot) - dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) switch n.Status { case 0: @@ -135,11 +150,45 @@ func (n *ValidatorProposalNotification) GetLegacyTitle() string { return "-" } +type ValidatorUpcomingProposalNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 + Slot uint64 +} + +func (n *ValidatorUpcomingProposalNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +func (n *ValidatorUpcomingProposalNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + slot := formatSlotLink(format, n.Slot) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + return fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s%s.`, slot, vali, dashboardAndGroupInfo) +} + +func (n *ValidatorUpcomingProposalNotification) GetLegacyInfo() string { + var generalPart, suffix string + vali := strconv.FormatUint(n.ValidatorIndex, 10) + slot := strconv.FormatUint(n.Slot, 10) + generalPart = fmt.Sprintf(`New scheduled block proposal at slot %s for Validator %s.`, slot, vali) + + return generalPart + suffix +} + +func (n *ValidatorUpcomingProposalNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorUpcomingProposalNotification) GetLegacyTitle() string { + return "Upcoming Block Proposal" +} + type ValidatorIsOfflineNotification struct { types.NotificationBaseImpl ValidatorIndex uint64 - IsOffline bool } func (n *ValidatorIsOfflineNotification) GetEntitiyId() string { @@ -149,19 +198,9 @@ func (n *ValidatorIsOfflineNotification) GetEntitiyId() string { // Overwrite specific methods func (n *ValidatorIsOfflineNotification) GetInfo(format types.NotificationFormat) string { vali := formatValidatorLink(format, n.ValidatorIndex) - epoch := "" - if n.IsOffline { - epoch = formatEpochLink(format, n.LatestState) - } else { - epoch = formatEpochLink(format, n.Epoch) - } - dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) - - if n.IsOffline { - return fmt.Sprintf(`Validator %v%v is offline since epoch %s.`, vali, dashboardAndGroupInfo, epoch) - } else { - return fmt.Sprintf(`Validator %v%v is back online since epoch %v.`, vali, dashboardAndGroupInfo, epoch) - } + epoch := formatEpochLink(format, n.LatestState) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + return fmt.Sprintf(`Validator %v%v is offline since epoch %s.`, vali, dashboardAndGroupInfo, epoch) } func (n *ValidatorIsOfflineNotification) GetTitle() string { @@ -169,70 +208,72 @@ func (n *ValidatorIsOfflineNotification) GetTitle() string { } func (n *ValidatorIsOfflineNotification) GetLegacyInfo() string { - if n.IsOffline { - return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.LatestState) - } else { - return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) - } + return fmt.Sprintf(`Validator %v is offline since epoch %d.`, n.ValidatorIndex, n.Epoch) } func (n *ValidatorIsOfflineNotification) GetLegacyTitle() string { - if n.IsOffline { - return "Validator is Offline" - } else { - return "Validator Back Online" - } + return "Validator is Offline" +} + +type ValidatorIsOnlineNotification struct { + types.NotificationBaseImpl + + ValidatorIndex uint64 +} + +func (n *ValidatorIsOnlineNotification) GetEntitiyId() string { + return fmt.Sprintf("%d", n.ValidatorIndex) +} + +// Overwrite specific methods +func (n *ValidatorIsOnlineNotification) GetInfo(format types.NotificationFormat) string { + vali := formatValidatorLink(format, n.ValidatorIndex) + epoch := formatEpochLink(format, n.Epoch) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) + return fmt.Sprintf(`Validator %v%v is back online since epoch %v.`, vali, dashboardAndGroupInfo, epoch) +} + +func (n *ValidatorIsOnlineNotification) GetTitle() string { + return n.GetLegacyTitle() +} + +func (n *ValidatorIsOnlineNotification) GetLegacyInfo() string { + return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) +} + +func (n *ValidatorIsOnlineNotification) GetLegacyTitle() string { + return "Validator Back Online" +} + +type ValidatorGroupEfficiencyNotification struct { + types.NotificationBaseImpl + + Threshold float64 + Efficiency float64 +} + +func (n *ValidatorGroupEfficiencyNotification) GetEntitiyId() string { + return fmt.Sprintf("%s - %s", n.GetDashboardName(), n.GetDashboardGroupName()) +} + +// Overwrite specific methods +func (n *ValidatorGroupEfficiencyNotification) GetInfo(format types.NotificationFormat) string { + dashboardAndGroupInfo := formatPureDashboardAndGroupLink(format, n) + epoch := formatEpochLink(format, n.Epoch) + return fmt.Sprintf(`%s efficiency of %.2f%% was below the threhold of %.2f%% in epoch %s.`, dashboardAndGroupInfo, n.Efficiency, n.Threshold, epoch) } -// type validatorGroupIsOfflineNotification struct { -// types.NotificationBaseImpl - -// IsOffline bool -// } - -// func (n *validatorGroupIsOfflineNotification) GetEntitiyId() string { -// return fmt.Sprintf("%s - %s", n.GetDashboardName(), n.GetDashboardGroupName()) -// } - -// // Overwrite specific methods -// func (n *validatorGroupIsOfflineNotification) GetInfo(format types.NotificationFormat) string { -// epoch := "" -// if n.IsOffline { -// epoch = formatEpochLink(format, n.LatestState) -// } else { -// epoch = formatEpochLink(format, n.Epoch) -// } - -// if n.IsOffline { -// return fmt.Sprintf(`Group %s is offline since epoch %s.`, n.DashboardGroupName, epoch) -// } else { -// return fmt.Sprintf(`Group %s is back online since epoch %v.`, n.DashboardGroupName, epoch) -// } -// } - -// func (n *validatorGroupIsOfflineNotification) GetTitle() string { -// if n.IsOffline { -// return "Group is offline" -// } else { -// return "Group is back online" -// } -// } - -// func (n *validatorGroupIsOfflineNotification) GetLegacyInfo() string { -// if n.IsOffline { -// return fmt.Sprintf(`Group %s is offline since epoch %s.`, n.DashboardGroupName, n.LatestState) -// } else { -// return fmt.Sprintf(`Group %s is back online since epoch %v.`, n.DashboardGroupName, n.Epoch) -// } -// } - -// func (n *validatorGroupIsOfflineNotification) GetLegacyTitle() string { -// if n.IsOffline { -// return "Group is offline" -// } else { -// return "Group is back online" -// } -// } +func (n *ValidatorGroupEfficiencyNotification) GetTitle() string { + return "Low group efficiency" +} + +func (n *ValidatorGroupEfficiencyNotification) GetLegacyInfo() string { + return n.GetInfo(types.NotifciationFormatText) +} + +func (n *ValidatorGroupEfficiencyNotification) GetLegacyTitle() string { + return n.GetTitle() +} type ValidatorAttestationNotification struct { types.NotificationBaseImpl @@ -247,7 +288,7 @@ func (n *ValidatorAttestationNotification) GetEntitiyId() string { } func (n *ValidatorAttestationNotification) GetInfo(format types.NotificationFormat) string { - dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) vali := formatValidatorLink(format, n.ValidatorIndex) epoch := formatEpochLink(format, n.Epoch) @@ -321,7 +362,7 @@ func (n *ValidatorGotSlashedNotification) GetEntitiyId() string { } func (n *ValidatorGotSlashedNotification) GetInfo(format types.NotificationFormat) string { - dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) vali := formatValidatorLink(format, n.ValidatorIndex) epoch := formatEpochLink(format, n.Epoch) slasher := formatValidatorLink(format, n.Slasher) @@ -357,7 +398,7 @@ func (n *ValidatorWithdrawalNotification) GetEntitiyId() string { } func (n *ValidatorWithdrawalNotification) GetInfo(format types.NotificationFormat) string { - dashboardAndGroupInfo := formatDashboardAndGroupLink(format, n) + dashboardAndGroupInfo := formatValidatorPrefixedDashboardAndGroupLink(format, n) vali := formatValidatorLink(format, n.ValidatorIndex) amount := utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false) generalPart := fmt.Sprintf(`An automatic withdrawal of %s has been processed for validator %s%s.`, amount, vali, dashboardAndGroupInfo) diff --git a/frontend/.env-example b/frontend/.env-example index 8e5b8de1b..ffb100613 100644 --- a/frontend/.env-example +++ b/frontend/.env-example @@ -1,15 +1,16 @@ -NUXT_PUBLIC_API_CLIENT: "" -NUXT_PUBLIC_LEGACY_API_CLIENT: "" -NUXT_PRIVATE_API_SERVER: "" -NUXT_PRIVATE_LEGACY_API_SERVER: "" -NUXT_PUBLIC_API_KEY: "" -NUXT_PUBLIC_DOMAIN: "" -NUXT_PUBLIC_STRIPE_BASE_URL: "" -NUXT_PUBLIC_LOG_IP: "" -NUXT_PUBLIC_SHOW_IN_DEVELOPMENT: "" -NUXT_PUBLIC_V1_DOMAIN: "" -NUXT_PUBLIC_LOG_FILE: "" -NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT: "" -NUXT_PUBLIC_MAINTENANCE_TS: "1717700652" -NUXT_PUBLIC_DEPLOYMENT_TYPE: "development" -PRIVATE_SSR_SECRET: "" +NUXT_IS_API_MOCKED= +NUXT_PRIVATE_API_SERVER= +NUXT_PRIVATE_LEGACY_API_SERVER= +NUXT_PUBLIC_API_CLIENT= +NUXT_PUBLIC_API_KEY= +NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT= +NUXT_PUBLIC_DEPLOYMENT_TYPE=development +NUXT_PUBLIC_DOMAIN= +NUXT_PUBLIC_LEGACY_API_CLIENT= +NUXT_PUBLIC_LOG_FILE= +NUXT_PUBLIC_LOG_IP= +NUXT_PUBLIC_MAINTENANCE_TS=1717700652 +NUXT_PUBLIC_SHOW_IN_DEVELOPMENT= +NUXT_PUBLIC_STRIPE_BASE_URL= +NUXT_PUBLIC_V1_DOMAIN= +PRIVATE_SSR_SECRET= diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index fac845230..8b64c0da4 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -7,11 +7,17 @@ "DashboardChartSummaryChartFilter", "DashboardGroupManagementModal", "DashboardValidatorManagmentModal", + "NotificationMachinesTable", "NotificationsClientsTable", + "NotificationsDashboardDialogEntity", + "NotificationsDashboardTable", + "NotificationsManagementModalWebhook", + "NotificationsManagementNetwork", + "NotificationsManagementSubscriptionDialog", "NotificationsManagmentMachines", "NotificationsNetworkTable", "NotificationsOverview", - "NotificationsRocketPoolTable", + "NotificationsTableEmpty", "a11y", "checkout", "ci", diff --git a/frontend/README.md b/frontend/README.md index 678f9fb89..7f1a5afb3 100644 --- a/frontend/README.md +++ b/frontend/README.md @@ -122,6 +122,17 @@ bun run preview Check out the [deployment documentation](https://nuxt.com/docs/getting-started/deployment) for more information. +## Get mocked api data + +If your `user` was added to the `ADMIN` or `DEV` group by the `api team`, you can get +`mocked data` from the `api` for certain `endpoints` by adding `?is_mocked=true` as a +`query parameter`. + +You can `turn on` mocked data `globally` for all `configured enpoints` +- by setting `NUXT_PUBLIC_IS_API_MOCKED=true` +in your [.env](.env) or +- running `npm run dev:mock:api` (See: [package.json](package.json)) + ## Descision Record We documented our decisions in the [decisions](decisions.md) file. diff --git a/frontend/assets/css/main.scss b/frontend/assets/css/main.scss index cb6d26ba6..77fce977f 100644 --- a/frontend/assets/css/main.scss +++ b/frontend/assets/css/main.scss @@ -3,9 +3,14 @@ @import "~/assets/css/fonts.scss"; @import "~/assets/css/utils.scss"; -html { +html, +h1,h2,h3,h4,h5,h6, +ul, +li + { margin: 0; padding: 0; + font: inherit; } ul { padding-inline-start: 1.5rem; diff --git a/frontend/components/bc/BcAccordion.vue b/frontend/components/bc/BcAccordion.vue new file mode 100644 index 000000000..a6c8dfb4b --- /dev/null +++ b/frontend/components/bc/BcAccordion.vue @@ -0,0 +1,152 @@ + + + + + diff --git a/frontend/components/bc/BcCard.vue b/frontend/components/bc/BcCard.vue new file mode 100644 index 000000000..f2eed9a5a --- /dev/null +++ b/frontend/components/bc/BcCard.vue @@ -0,0 +1,17 @@ + + + + + diff --git a/frontend/components/bc/BcContentFilter.vue b/frontend/components/bc/BcContentFilter.vue index 53e3a67cb..bf3619eea 100644 --- a/frontend/components/bc/BcContentFilter.vue +++ b/frontend/components/bc/BcContentFilter.vue @@ -1,15 +1,14 @@ diff --git a/frontend/components/bc/BcSlider.vue b/frontend/components/bc/BcSlider.vue index b3dcaf9c0..d965a55df 100644 --- a/frontend/components/bc/BcSlider.vue +++ b/frontend/components/bc/BcSlider.vue @@ -15,7 +15,6 @@ defineProps<{ :step class="bc-slider" type="range" - v-bind="$attrs" > diff --git a/frontend/components/bc/BcText.vue b/frontend/components/bc/BcText.vue index 4829e1583..588198ecf 100644 --- a/frontend/components/bc/BcText.vue +++ b/frontend/components/bc/BcText.vue @@ -1,8 +1,9 @@ diff --git a/frontend/components/notifications/DashboardsTable.vue b/frontend/components/notifications/NotificationsDashboardsTable.vue similarity index 88% rename from frontend/components/notifications/DashboardsTable.vue rename to frontend/components/notifications/NotificationsDashboardsTable.vue index f221486a7..8c846f496 100644 --- a/frontend/components/notifications/DashboardsTable.vue +++ b/frontend/components/notifications/NotificationsDashboardsTable.vue @@ -5,9 +5,9 @@ import IconValidator from '../icon/IconValidator.vue' import IconAccount from '../icon/IconAccount.vue' import type { Cursor } from '~/types/datatable' import type { DashboardType } from '~/types/dashboard' -import { useUserDashboardStore } from '~/stores/dashboard/useUserDashboardStore' import type { ChainIDs } from '~/types/network' import type { NotificationDashboardsTableRow } from '~/types/api/notifications' +import { NotificationsDashboardDialogEntity } from '#components' defineEmits<{ (e: 'openDialog'): void }>() @@ -29,8 +29,6 @@ const { setSearch, } = useNotificationsDashboardStore(networkId) -const { getDashboardLabel } = useUserDashboardStore() - const { width } = useWindowSize() const colsVisible = computed(() => { return { @@ -40,21 +38,14 @@ const colsVisible = computed(() => { } }) -const openDialog = () => { - // TODO: implement dialog - alert('not implemented yet 😪') -} - const getDashboardType = (isAccount: boolean): DashboardType => isAccount ? 'account' : 'validator' const { overview } = useNotificationsDashboardOverviewStore() const mapEventtypeToText = (eventType: NotificationDashboardsTableRow['event_types'][number]) => { switch (eventType) { case 'attestation_missed': return $t('notifications.dashboards.event_type.attestation_missed') - case 'group_offline': - return $t('notifications.dashboards.event_type.group_offline') - case 'group_online': - return $t('notifications.dashboards.event_type.group_online') + case 'group_efficiency_below': + return $t('notifications.dashboards.event_type.group_efficiency_below') case 'incoming_tx': return $t('notifications.dashboards.event_type.incoming_tx') case 'max_collateral': @@ -95,6 +86,20 @@ const mapEventtypeToText = (eventType: NotificationDashboardsTableRow['event_typ const textDashboardNotifications = (event_types: NotificationDashboardsTableRow['event_types']) => { return event_types.map(mapEventtypeToText).join(', ') } + +const dialog = useDialog() + +const showDialog = (row: { identifier: string } & NotificationDashboardsTableRow) => { + dialog.open(NotificationsDashboardDialogEntity, { + data: { + dashboard_id: row.dashboard_id, + epoch: row.epoch, + group_id: row.group_id, + group_name: row.group_name, + identifier: row.identifier, + }, + }) +} - + + + @@ -245,10 +248,7 @@ const textDashboardNotifications = (event_types: NotificationDashboardsTableRow[
{{ $t("notifications.dashboards.expansion.label_group") }} @@ -267,7 +267,7 @@ const textDashboardNotifications = (event_types: NotificationDashboardsTableRow[
@@ -373,17 +377,6 @@ $breakpoint-lg: 1024px; @include utils.truncate-text; } -:deep(.bc-table-header) { - .h1 { - display: none; - } - - @media (min-width: $breakpoint-lg) { - .h1 { - display: block; - } - } -} :deep(.right-info) { flex-direction: column; justify-content: center; diff --git a/frontend/components/notifications/NotificationsMachinesTable.vue b/frontend/components/notifications/NotificationsMachinesTable.vue index 7f9ee188e..d58c6dfed 100644 --- a/frontend/components/notifications/NotificationsMachinesTable.vue +++ b/frontend/components/notifications/NotificationsMachinesTable.vue @@ -45,7 +45,7 @@ const machineEvent = (eventType: NotificationMachinesTableRow['event_type']) => - -