From 5968845cd8d997bf5d6072447f8da582e81c3224 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Fri, 5 Jul 2024 08:09:58 +0200 Subject: [PATCH 001/187] use map for pubkey filter --- backend/pkg/notification/db.go | 9 +++---- backend/pkg/notification/notifications.go | 32 ++++++++++++++++------- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index eed83b0f1..cf2a217e0 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -1,15 +1,13 @@ package notification import ( - "encoding/hex" - "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" ) -func GetSubsForEventFilter(eventName types.EventName) ([][]byte, map[string][]types.Subscription, error) { +func GetSubsForEventFilter(eventName types.EventName) (map[string]bool, map[string][]types.Subscription, error) { var subs []types.Subscription subQuery := ` SELECT id, user_id, event_filter, last_sent_epoch, created_epoch, event_threshold, ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, internal_state from users_subscriptions where event_name = $1 @@ -21,7 +19,7 @@ func GetSubsForEventFilter(eventName types.EventName) ([][]byte, map[string][]ty return nil, nil, err } - filtersEncode := make([][]byte, 0, len(subs)) + filtersEncode := make(map[string]bool, len(subs)) for _, sub := range subs { if _, ok := subMap[sub.EventFilter]; !ok { subMap[sub.EventFilter] = make([]types.Subscription, 0) @@ -36,8 +34,7 @@ func GetSubsForEventFilter(eventName types.EventName) ([][]byte, map[string][]ty State: sub.State, }) - b, _ := hex.DecodeString(sub.EventFilter) - filtersEncode = append(filtersEncode, b) + filtersEncode[sub.EventFilter] = true } return filtersEncode, subMap, nil } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index e40a83b44..ec0e841a9 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -2959,20 +2959,34 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint6 RPLStakeMax BigFloat `db:"max_rpl_stake"` } + // filter nodes with no minipools (anymore) because they have min/max stake of 0 + // TODO properly remove notification entry from db stakeInfoPerNode := make([]dbResult, 0) batchSize := 5000 - dataLen := len(pubkeys) - for i := 0; i < dataLen; i += batchSize { - var keys [][]byte - start := i - end := i + batchSize - - if dataLen < end { - end = dataLen + keys := make([][]byte, 0, batchSize) + for pubkey := range pubkeys { + b, err := hex.DecodeString(pubkey) + if err != nil { + log.Error(err, fmt.Sprintf("error decoding pubkey %s", pubkey), 0) + continue } + keys = append(keys, b) - keys = pubkeys[start:end] + if len(keys) > batchSize { + var partial []dbResult + err = db.WriterDb.Select(&partial, ` + SELECT address, rpl_stake, min_rpl_stake, max_rpl_stake + FROM rocketpool_nodes + WHERE address = ANY($1) AND min_rpl_stake != 0 AND max_rpl_stake != 0`, pq.ByteaArray(keys)) + if err != nil { + return err + } + stakeInfoPerNode = append(stakeInfoPerNode, partial...) + keys = make([][]byte, 0, batchSize) + } + } + if len(keys) > 0 { var partial []dbResult // filter nodes with no minipools (anymore) because they have min/max stake of 0 From 94b87dc9682c6dae3948018c16a2354ec0294d69 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Fri, 5 Jul 2024 08:25:01 +0200 Subject: [PATCH 002/187] simplify handling of pubkeys in notifications --- backend/pkg/commons/types/frontend.go | 16 ++++++++++++++++ backend/pkg/notification/db.go | 13 +++++++------ backend/pkg/notification/notifications.go | 12 ++++++------ 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index ee589fcbb..f468b5c43 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -266,6 +266,22 @@ type Subscription struct { EventThreshold float64 `db:"event_threshold"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash" swaggertype:"string"` State sql.NullString `db:"internal_state" swaggertype:"string"` + GroupId *uint32 + DashboardId *uint32 +} + +type ValidatorDashboardConfig struct { + DashboardsByUserId map[uint64]map[uint32]*ValidatorDashboard +} + +type ValidatorDashboard struct { + Name string `db:"name"` + Groups map[uint32]*ValidatorDashboardGroup +} + +type ValidatorDashboardGroup struct { + Name string `db:"name"` + Validators [][]byte } type TaggedValidators struct { diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index cf2a217e0..26838c9bb 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -7,7 +7,11 @@ import ( "github.com/lib/pq" ) -func GetSubsForEventFilter(eventName types.EventName) (map[string]bool, map[string][]types.Subscription, error) { +// Retrieves all subscription for a given event filter +// Map key corresponds to the event filter which can be +// a validator pubkey or an eth1 address (for RPL notifications) +// or a list of validators for the tax report notifications +func GetSubsForEventFilter(eventName types.EventName) (map[string][]types.Subscription, error) { var subs []types.Subscription subQuery := ` SELECT id, user_id, event_filter, last_sent_epoch, created_epoch, event_threshold, ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, internal_state from users_subscriptions where event_name = $1 @@ -16,10 +20,9 @@ func GetSubsForEventFilter(eventName types.EventName) (map[string]bool, map[stri subMap := make(map[string][]types.Subscription, 0) err := db.FrontendWriterDB.Select(&subs, subQuery, utils.GetNetwork()+":"+string(eventName)) if err != nil { - return nil, nil, err + return nil, err } - filtersEncode := make(map[string]bool, len(subs)) for _, sub := range subs { if _, ok := subMap[sub.EventFilter]; !ok { subMap[sub.EventFilter] = make([]types.Subscription, 0) @@ -33,10 +36,8 @@ func GetSubsForEventFilter(eventName types.EventName) (map[string]bool, map[stri EventThreshold: sub.EventThreshold, State: sub.State, }) - - filtersEncode[sub.EventFilter] = true } - return filtersEncode, subMap, nil + return subMap, nil } func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index ec0e841a9..4a58768dc 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1207,7 +1207,7 @@ func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[type ExecRewardETH float64 } - _, subMap, err := GetSubsForEventFilter(eventName) + subMap, err := GetSubsForEventFilter(eventName) if err != nil { return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) } @@ -1394,7 +1394,7 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { } func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { - _, subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) + subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) } @@ -1573,7 +1573,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma return fmt.Errorf("retrieved more than %v online validators notifications: %v, exiting", onlineValidatorsLimit, len(onlineValidators)) } - _, subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName) + subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName) if err != nil { return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorIsOfflineEventName, err) } @@ -2022,7 +2022,7 @@ func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { // collectWithdrawalNotifications collects all notifications validator withdrawals func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) - _, subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) + subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) if err != nil { return fmt.Errorf("error getting subscriptions for missed attestations %w", err) } @@ -2947,7 +2947,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[ui } func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { - pubkeys, subMap, err := GetSubsForEventFilter(eventName) + subMap, err := GetSubsForEventFilter(eventName) if err != nil { return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) } @@ -2964,7 +2964,7 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint6 stakeInfoPerNode := make([]dbResult, 0) batchSize := 5000 keys := make([][]byte, 0, batchSize) - for pubkey := range pubkeys { + for pubkey := range subMap { b, err := hex.DecodeString(pubkey) if err != nil { log.Error(err, fmt.Sprintf("error decoding pubkey %s", pubkey), 0) From 67b8529f8b4b8580b7472c190eb251dad5cef1a8 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Fri, 5 Jul 2024 08:54:13 +0200 Subject: [PATCH 003/187] improve typing of userid --- backend/pkg/commons/db/bigtable.go | 30 ++--- backend/pkg/commons/types/frontend.go | 18 +-- backend/pkg/notification/db.go | 16 +-- backend/pkg/notification/notifications.go | 130 +++++++++++++++------- 4 files changed, 125 insertions(+), 69 deletions(-) diff --git a/backend/pkg/commons/db/bigtable.go b/backend/pkg/commons/db/bigtable.go index edd9b3c17..7b7cd147e 100644 --- a/backend/pkg/commons/db/bigtable.go +++ b/backend/pkg/commons/db/bigtable.go @@ -194,7 +194,7 @@ func (bigtable *Bigtable) GetClient() *gcp_bigtable.Client { return bigtable.client } -func (bigtable *Bigtable) SaveMachineMetric(process string, userID uint64, machine string, data []byte) error { +func (bigtable *Bigtable) SaveMachineMetric(process string, userID types.UserId, machine string, data []byte) error { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() @@ -234,7 +234,7 @@ func (bigtable *Bigtable) SaveMachineMetric(process string, userID uint64, machi return nil } -func (bigtable Bigtable) getMachineMetricNamesMap(userID uint64, searchDepth int) (map[string]bool, error) { +func (bigtable Bigtable) getMachineMetricNamesMap(userID types.UserId, searchDepth int) (map[string]bool, error) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30)) defer cancel() @@ -265,7 +265,7 @@ func (bigtable Bigtable) getMachineMetricNamesMap(userID uint64, searchDepth int return machineNames, nil } -func (bigtable Bigtable) GetMachineMetricsMachineNames(userID uint64) ([]string, error) { +func (bigtable Bigtable) GetMachineMetricsMachineNames(userID types.UserId) ([]string, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -288,7 +288,7 @@ func (bigtable Bigtable) GetMachineMetricsMachineNames(userID uint64) ([]string, return result, nil } -func (bigtable Bigtable) GetMachineMetricsMachineCount(userID uint64) (uint64, error) { +func (bigtable Bigtable) GetMachineMetricsMachineCount(userID types.UserId) (uint64, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -310,7 +310,7 @@ func (bigtable Bigtable) GetMachineMetricsMachineCount(userID uint64) (uint64, e return uint64(card), nil } -func (bigtable Bigtable) GetMachineMetricsNode(userID uint64, limit, offset int) ([]*types.MachineMetricNode, error) { +func (bigtable Bigtable) GetMachineMetricsNode(userID types.UserId, limit, offset int) ([]*types.MachineMetricNode, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -335,7 +335,7 @@ func (bigtable Bigtable) GetMachineMetricsNode(userID uint64, limit, offset int) ) } -func (bigtable Bigtable) GetMachineMetricsValidator(userID uint64, limit, offset int) ([]*types.MachineMetricValidator, error) { +func (bigtable Bigtable) GetMachineMetricsValidator(userID types.UserId, limit, offset int) ([]*types.MachineMetricValidator, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -360,7 +360,7 @@ func (bigtable Bigtable) GetMachineMetricsValidator(userID uint64, limit, offset ) } -func (bigtable Bigtable) GetMachineMetricsSystem(userID uint64, limit, offset int) ([]*types.MachineMetricSystem, error) { +func (bigtable Bigtable) GetMachineMetricsSystem(userID types.UserId, limit, offset int) ([]*types.MachineMetricSystem, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "userId": userID, @@ -385,7 +385,7 @@ func (bigtable Bigtable) GetMachineMetricsSystem(userID uint64, limit, offset in ) } -func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | types.MachineMetricValidator](bigtable Bigtable, process string, userID uint64, limit, offset int, marshler func(data []byte, machine string) *T) ([]*T, error) { +func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | types.MachineMetricValidator](bigtable Bigtable, process string, userID types.UserId, limit, offset int, marshler func(data []byte, machine string) *T) ([]*T, error) { ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*30)) defer cancel() @@ -429,7 +429,7 @@ func getMachineMetrics[T types.MachineMetricSystem | types.MachineMetricNode | t return res, nil } -func (bigtable Bigtable) GetMachineRowKey(userID uint64, process string, machine string) string { +func (bigtable Bigtable) GetMachineRowKey(userID types.UserId, process string, machine string) string { return fmt.Sprintf("u:%s:p:%s:m:%s", bigtable.reversePaddedUserID(userID), process, machine) } @@ -437,7 +437,7 @@ func (bigtable Bigtable) GetMachineRowKey(userID uint64, process string, machine // machineData contains the latest machine data in CurrentData // and 5 minute old data in fiveMinuteOldData (defined in limit) // as well as the insert timestamps of both -func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable.RowList) (map[uint64]map[string]*types.MachineMetricSystemUser, error) { +func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable.RowList) (map[types.UserId]map[string]*types.MachineMetricSystemUser, error) { tmr := time.AfterFunc(REPORT_TIMEOUT, func() { log.WarnWithFields(log.Fields{ "rowKeys": rowKeys, @@ -449,7 +449,7 @@ func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable. ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(time.Second*200)) defer cancel() - res := make(map[uint64]map[string]*types.MachineMetricSystemUser) // userID -> machine -> data + res := make(map[types.UserId]map[string]*types.MachineMetricSystemUser) // userID -> machine -> data limit := 5 @@ -509,7 +509,7 @@ func (bigtable Bigtable) GetMachineMetricsForNotifications(rowKeys gcp_bigtable. } //nolint:unparam -func machineMetricRowParts(r string) (bool, uint64, string, string) { +func machineMetricRowParts(r string) (bool, types.UserId, string, string) { keySplit := strings.Split(r, ":") userID, err := strconv.ParseUint(keySplit[1], 10, 64) @@ -526,7 +526,7 @@ func machineMetricRowParts(r string) (bool, uint64, string, string) { process := keySplit[3] - return true, userID, machine, process + return true, types.UserId(userID), machine, process } func (bigtable *Bigtable) SaveValidatorBalances(epoch uint64, validators []*types.Validator) error { @@ -2678,8 +2678,8 @@ func GetCurrentDayClIncome(validator_indices []uint64) (map[uint64]int64, error) return dayIncome, nil } -func (bigtable *Bigtable) reversePaddedUserID(userID uint64) string { - return fmt.Sprintf("%09d", ^uint64(0)-userID) +func (bigtable *Bigtable) reversePaddedUserID(userID types.UserId) string { + return fmt.Sprintf("%09d", ^uint64(0)-uint64(userID)) } func (bigtable *Bigtable) reversedPaddedEpoch(epoch uint64) string { diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index f468b5c43..d91d60747 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -12,6 +12,7 @@ import ( "firebase.google.com/go/messaging" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/consapi/types" "github.com/lib/pq" "github.com/pkg/errors" "golang.org/x/text/cases" @@ -164,7 +165,7 @@ type EventNameDesc struct { } type MachineMetricSystemUser struct { - UserID uint64 + UserID UserId Machine string CurrentData *MachineMetricSystem CurrentDataInsertTs int64 @@ -255,7 +256,7 @@ type Notification interface { type Subscription struct { ID *uint64 `db:"id,omitempty"` - UserID *uint64 `db:"user_id,omitempty"` + UserID *UserId `db:"user_id,omitempty"` EventName string `db:"event_name"` EventFilter string `db:"event_filter"` LastSent *time.Time `db:"last_sent_ts"` @@ -266,22 +267,25 @@ type Subscription struct { EventThreshold float64 `db:"event_threshold"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash" swaggertype:"string"` State sql.NullString `db:"internal_state" swaggertype:"string"` - GroupId *uint32 - DashboardId *uint32 + GroupId *int64 + DashboardId *int64 } +type UserId uint64 +type DashboardId uint64 +type DashboardGroupId uint64 type ValidatorDashboardConfig struct { - DashboardsByUserId map[uint64]map[uint32]*ValidatorDashboard + DashboardsByUserId map[UserId]map[DashboardId]*ValidatorDashboard } type ValidatorDashboard struct { Name string `db:"name"` - Groups map[uint32]*ValidatorDashboardGroup + Groups map[DashboardGroupId]*ValidatorDashboardGroup } type ValidatorDashboardGroup struct { Name string `db:"name"` - Validators [][]byte + Validators []types.ValidatorIndex } type TaggedValidators struct { diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 26838c9bb..23f890ba6 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -40,14 +40,14 @@ func GetSubsForEventFilter(eventName types.EventName) (map[string][]types.Subscr return subMap, nil } -func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { - pushByID := map[uint64][]string{} +func GetUserPushTokenByIds(ids []types.UserId) (map[types.UserId][]string, error) { + pushByID := map[types.UserId][]string{} if len(ids) == 0 { return pushByID, nil } var rows []struct { - ID uint64 `db:"user_id"` - Token string `db:"notification_token"` + ID types.UserId `db:"user_id"` + Token string `db:"notification_token"` } err := db.FrontendWriterDB.Select(&rows, "SELECT DISTINCT ON (user_id, notification_token) user_id, notification_token FROM users_devices WHERE (user_id = ANY($1) AND user_id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)) AND notify_enabled = true AND active = true AND notification_token IS NOT NULL AND LENGTH(notification_token) > 20 ORDER BY user_id, notification_token, id DESC", pq.Array(ids), types.PushNotificationChannel) @@ -67,14 +67,14 @@ func GetUserPushTokenByIds(ids []uint64) (map[uint64][]string, error) { } // GetUserEmailsByIds returns the emails of users. -func GetUserEmailsByIds(ids []uint64) (map[uint64]string, error) { - mailsByID := map[uint64]string{} +func GetUserEmailsByIds(ids []types.UserId) (map[types.UserId]string, error) { + mailsByID := map[types.UserId]string{} if len(ids) == 0 { return mailsByID, nil } var rows []struct { - ID uint64 `db:"id"` - Email string `db:"email"` + ID types.UserId `db:"id"` + Email string `db:"email"` } // err := db.FrontendWriterDB.Select(&rows, "SELECT id, email FROM users WHERE id = ANY($1) AND id NOT IN (SELECT user_id from users_notification_channels WHERE active = false and channel = $2)", pq.Array(ids), types.EmailNotificationChannel) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 4a58768dc..c408c1cdf 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -225,8 +225,8 @@ func notificationSender() { } } -func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[uint64]map[types.EventName][]types.Notification{} +func collectNotifications(epoch uint64) (map[types.UserId]map[types.EventName][]types.Notification, error) { + notificationsByUserID := map[types.UserId]map[types.EventName][]types.Notification{} start := time.Now() var err error var dbIsCoherent bool @@ -253,6 +253,58 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. log.Infof("started collecting notifications") + type dashboardDefinitionRow struct { + DashboardId types.DashboardId `db:"dashboard_id"` + DashboardName string `db:"dashboard_name"` + UserId types.UserId `db:"user_id"` + GroupId types.DashboardGroupId `db:"group_id"` + GroupName string `db:"group_name"` + ValidatorIndex types.ValidatorIndex `db:"validator_index"` + } + + log.Infof("retrieving dashboard definitions") + // TODO: add a filter to retrieve only groups that have notifications enabled + // Needs a new field in the db + var dashboardDefinitions []dashboardDefinitionRow + err = db.AlloyWriter.Select(&dashboardDefinitions, ` + select + users_val_dashboards.id as dashboard_id, + users_val_dashboards.name as dashboard_name, + users_val_dashboards.user_id, + users_val_dashboards_groups.id as group_id, + users_val_dashboards_groups.name as group_name, + users_val_dashboards_validators.validator_index + from users_val_dashboards + left join users_val_dashboards_groups on users_val_dashboards_groups.dashboard_id = users_val_dashboards.id + left join users_val_dashboards_validators on users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id; + `) + if err != nil { + return nil, fmt.Errorf("error getting dashboard definitions: %v", err) + } + + // Now initialize the validator dashboard configuration map + validatorDashboardConfig := &types.ValidatorDashboardConfig{ + DashboardsByUserId: make(map[types.UserId]map[types.DashboardId]*types.ValidatorDashboard), + } + for _, row := range dashboardDefinitions { + if validatorDashboardConfig.DashboardsByUserId[row.UserId] == nil { + validatorDashboardConfig.DashboardsByUserId[row.UserId] = make(map[types.DashboardId]*types.ValidatorDashboard) + } + if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] == nil { + validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] = &types.ValidatorDashboard{ + Name: row.DashboardName, + Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), + } + } + if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] == nil { + validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ + Name: row.GroupName, + Validators: []uint64{}, + } + } + validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) + } + err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() @@ -356,8 +408,8 @@ func collectNotifications(epoch uint64) (map[uint64]map[types.EventName][]types. return notificationsByUserID, nil } -func collectUserDbNotifications(epoch uint64) (map[uint64]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[uint64]map[types.EventName][]types.Notification{} +func collectUserDbNotifications(epoch uint64) (map[types.UserId]map[types.EventName][]types.Notification, error) { + notificationsByUserID := map[types.UserId]map[types.EventName][]types.Notification{} var err error // Monitoring (premium): machine offline @@ -405,7 +457,7 @@ func collectUserDbNotifications(epoch uint64) (map[uint64]map[types.EventName][] return notificationsByUserID, nil } -func queueNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) { +func queueNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) { subByEpoch := map[uint64][]uint64{} // prevent multiple events being sent with the same subscription id @@ -541,8 +593,8 @@ func getNetwork() string { return "" } -func queuePushNotification(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - userIDs := []uint64{} +func queuePushNotification(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { + userIDs := []types.UserId{} for userID := range notificationsByUserID { userIDs = append(userIDs, userID) } @@ -646,8 +698,8 @@ func sendPushNotifications(useDB *sqlx.DB) error { return nil } -func queueEmailNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { - userIDs := []uint64{} +func queueEmailNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { + userIDs := []types.UserId{} for userID := range notificationsByUserID { userIDs = append(userIDs, userID) } @@ -841,7 +893,7 @@ func sendEmailNotifications(useDb *sqlx.DB) error { return nil } -func queueWebhookNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { +func queueWebhookNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { for userID, userNotifications := range notificationsByUserID { var webhooks []types.UserWebhook err := useDB.Select(&webhooks, ` @@ -1198,7 +1250,7 @@ func getUrlPart(validatorIndex uint64) string { return fmt.Sprintf(` For more information visit: https://%s/validator/%v.`, utils.Config.Frontend.SiteDomain, validatorIndex, utils.Config.Frontend.SiteDomain, validatorIndex) } -func collectBlockProposalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, status uint64, eventName types.EventName, epoch uint64) error { +func collectBlockProposalNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, status uint64, eventName types.EventName, epoch uint64) error { type dbResult struct { Proposer uint64 `db:"proposer"` Status uint64 `db:"status"` @@ -1393,7 +1445,7 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { return generalPart } -func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) @@ -1898,7 +1950,7 @@ func (n *validatorGotSlashedNotification) GetInfoMarkdown() string { return generalPart } -func collectValidatorGotSlashedNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectValidatorGotSlashedNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { dbResult, err := db.GetValidatorsGotSlashed(epoch) if err != nil { return fmt.Errorf("error getting slashed validators from database, err: %w", err) @@ -1919,7 +1971,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID map[uint64]ma var subscribers []struct { Ref uint64 `db:"ref"` Id uint64 `db:"id"` - UserId uint64 `db:"user_id"` + UserId types.UserId `db:"user_id"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` } @@ -2020,7 +2072,7 @@ func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { } // collectWithdrawalNotifications collects all notifications validator withdrawals -func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectWithdrawalNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) if err != nil { @@ -2075,7 +2127,7 @@ func collectWithdrawalNotifications(notificationsByUserID map[uint64]map[types.E type ethClientNotification struct { SubscriptionID uint64 - UserID uint64 + UserID types.UserId Epoch uint64 EthClient string EventFilter string @@ -2183,12 +2235,12 @@ func (n *ethClientNotification) GetInfoMarkdown() string { return generalPart } -func collectEthClientNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectEthClientNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates for _, client := range updatedClients { var dbResult []struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` Epoch uint64 `db:"created_epoch"` EventFilter string `db:"event_filter"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` @@ -2234,13 +2286,13 @@ func collectEthClientNotifications(notificationsByUserID map[uint64]map[types.Ev type MachineEvents struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` MachineName string `db:"machine"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` EventThreshold float64 `db:"event_threshold"` } -func collectMonitoringMachineOffline(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineOffline(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { nowTs := time.Now().Unix() return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineOfflineEventName, 120, // notify condition @@ -2259,7 +2311,7 @@ func isMachineDataRecent(machineData *types.MachineMetricSystemUser) bool { return machineData.CurrentDataInsertTs >= nowTs-60*60 } -func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineDiskAlmostFullEventName, 750, // notify condition func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { @@ -2274,7 +2326,7 @@ func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[uint64]map ) } -func collectMonitoringMachineCPULoad(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineCPULoad(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineCpuLoadEventName, 10, // notify condition func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { @@ -2296,7 +2348,7 @@ func collectMonitoringMachineCPULoad(notificationsByUserID map[uint64]map[types. ) } -func collectMonitoringMachineMemoryUsage(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineMemoryUsage(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineMemoryUsageEventName, 10, // notify condition func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { @@ -2317,7 +2369,7 @@ func collectMonitoringMachineMemoryUsage(notificationsByUserID map[uint64]map[ty var isFirstNotificationCheck = true func collectMonitoringMachine( - notificationsByUserID map[uint64]map[types.EventName][]types.Notification, + notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName, epochWaitInBetween int, notifyConditionFulfilled func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool, @@ -2439,7 +2491,7 @@ func collectMonitoringMachine( type monitorMachineNotification struct { SubscriptionID uint64 MachineName string - UserID uint64 + UserID types.UserId Epoch uint64 EventName types.EventName UnsubscribeHash sql.NullString @@ -2518,7 +2570,7 @@ func (n *monitorMachineNotification) GetInfoMarkdown() string { type taxReportNotification struct { SubscriptionID uint64 - UserID uint64 + UserID types.UserId Epoch uint64 EventFilter string UnsubscribeHash sql.NullString @@ -2598,7 +2650,7 @@ func (n *taxReportNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectTaxReportNotificationNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectTaxReportNotificationNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { lastStatsDay, err := cache.LatestExportedStatisticDay.GetOrDefault(db.GetLastExportedStatisticDay) if err != nil { @@ -2613,7 +2665,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID map[uint64] var dbResult []struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` Epoch uint64 `db:"created_epoch"` EventFilter string `db:"event_filter"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` @@ -2658,7 +2710,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID map[uint64] type networkNotification struct { SubscriptionID uint64 - UserID uint64 + UserID types.UserId Epoch uint64 EventFilter string UnsubscribeHash sql.NullString @@ -2709,7 +2761,7 @@ func (n *networkNotification) GetInfoMarkdown() string { return generalPart } -func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectNetworkNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { count := 0 err := db.WriterDb.Get(&count, ` SELECT count(ts) FROM network_liveness WHERE (headepoch-finalizedepoch) > 3 AND ts > now() - interval '60 minutes'; @@ -2722,7 +2774,7 @@ func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.Even if count > 0 { var dbResult []struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` Epoch uint64 `db:"created_epoch"` EventFilter string `db:"event_filter"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` @@ -2763,7 +2815,7 @@ func collectNetworkNotifications(notificationsByUserID map[uint64]map[types.Even type rocketpoolNotification struct { SubscriptionID uint64 - UserID uint64 + UserID types.UserId Epoch uint64 EventFilter string EventName types.EventName @@ -2839,7 +2891,7 @@ func (n *rocketpoolNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectRocketpoolComissionNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectRocketpoolComissionNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { fee := 0.0 err := db.WriterDb.Get(&fee, ` select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; @@ -2852,7 +2904,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID map[uint64]ma if fee > 0 { var dbResult []struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` Epoch uint64 `db:"created_epoch"` EventFilter string `db:"event_filter"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` @@ -2893,7 +2945,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID map[uint64]ma return nil } -func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { var ts int64 err := db.WriterDb.Get(&ts, ` select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; @@ -2906,7 +2958,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[ui if ts+3*60*60 > time.Now().Unix() { var dbResult []struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` Epoch uint64 `db:"created_epoch"` EventFilter string `db:"event_filter"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` @@ -2946,7 +2998,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[ui return nil } -func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { +func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { subMap, err := GetSubsForEventFilter(eventName) if err != nil { return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) @@ -3124,7 +3176,7 @@ func bigFloat(x float64) *big.Float { return new(big.Float).SetFloat64(x) } -func collectSyncCommittee(notificationsByUserID map[uint64]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { +func collectSyncCommittee(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee nextPeriod := currentPeriod + 1 @@ -3152,7 +3204,7 @@ func collectSyncCommittee(notificationsByUserID map[uint64]map[types.EventName][ var dbResult []struct { SubscriptionID uint64 `db:"id"` - UserID uint64 `db:"user_id"` + UserID types.UserId `db:"user_id"` EventFilter string `db:"event_filter"` UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` } From 3aa7d8e8b56800b56551b52fd361169c34b3b39c Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Fri, 5 Jul 2024 09:37:07 +0200 Subject: [PATCH 004/187] ensure that there is never more than one notification per user / type / filter combination --- backend/pkg/commons/types/frontend.go | 2 + backend/pkg/notification/notifications.go | 208 +++++++++------------- 2 files changed, 84 insertions(+), 126 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index d91d60747..5367951a1 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -20,6 +20,8 @@ import ( ) type EventName string +type EventFilter string +type NotificationsPerUserId map[UserId]map[EventName]map[EventFilter]Notification const ( ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index c408c1cdf..e2db123f1 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -225,8 +225,8 @@ func notificationSender() { } } -func collectNotifications(epoch uint64) (map[types.UserId]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[types.UserId]map[types.EventName][]types.Notification{} +func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} start := time.Now() var err error var dbIsCoherent bool @@ -408,8 +408,8 @@ func collectNotifications(epoch uint64) (map[types.UserId]map[types.EventName][] return notificationsByUserID, nil } -func collectUserDbNotifications(epoch uint64) (map[types.UserId]map[types.EventName][]types.Notification, error) { - notificationsByUserID := map[types.UserId]map[types.EventName][]types.Notification{} +func collectUserDbNotifications(epoch uint64) (types.NotificationsPerUserId, error) { + notificationsByUserID := types.NotificationsPerUserId{} var err error // Monitoring (premium): machine offline @@ -457,29 +457,9 @@ func collectUserDbNotifications(epoch uint64) (map[types.UserId]map[types.EventN return notificationsByUserID, nil } -func queueNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) { +func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) { subByEpoch := map[uint64][]uint64{} - // prevent multiple events being sent with the same subscription id - for user, notifications := range notificationsByUserID { - for eventType, events := range notifications { - filteredEvents := make([]types.Notification, 0) - - for _, ev := range events { - isDuplicate := false - for _, fe := range filteredEvents { - if fe.GetSubscriptionID() == ev.GetSubscriptionID() { - isDuplicate = true - } - } - if !isDuplicate { - filteredEvents = append(filteredEvents, ev) - } - } - notificationsByUserID[user][eventType] = filteredEvents - } - } - err := queueEmailNotifications(notificationsByUserID, useDB) if err != nil { log.Error(err, "error queuing email notifications", 0) @@ -593,7 +573,7 @@ func getNetwork() string { return "" } -func queuePushNotification(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { +func queuePushNotification(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { userIDs := []types.UserId{} for userID := range notificationsByUserID { userIDs = append(userIDs, userID) @@ -611,7 +591,7 @@ func queuePushNotification(notificationsByUserID map[types.UserId]map[types.Even continue } - go func(userTokens []string, userNotifications map[types.EventName][]types.Notification) { + go func(userTokens []string, userNotifications map[types.EventName]map[types.EventFilter]types.Notification) { var batch []*messaging.Message for event, ns := range userNotifications { for _, n := range ns { @@ -698,7 +678,7 @@ func sendPushNotifications(useDB *sqlx.DB) error { return nil } -func queueEmailNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { +func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { userIDs := []types.UserId{} for userID := range notificationsByUserID { userIDs = append(userIDs, userID) @@ -717,7 +697,7 @@ func queueEmailNotifications(notificationsByUserID map[types.UserId]map[types.Ev // metrics.Errors.WithLabelValues("notifications_mail_not_found").Inc() continue } - go func(userEmail string, userNotifications map[types.EventName][]types.Notification) { + go func(userEmail string, userNotifications map[types.EventName]map[types.EventFilter]types.Notification) { attachments := []types.EmailAttachment{} var msg types.Email @@ -741,7 +721,8 @@ func queueEmailNotifications(notificationsByUserID map[types.UserId]map[types.Ev //nolint:gosec // this is a static string msg.Body += template.HTML(fmt.Sprintf("%s
====

", types.EventLabel[event_title])) unsubURL := "https://" + utils.Config.Frontend.SiteDomain + "/notifications/unsubscribe" - for i, n := range ns { + i := 0 + for _, n := range ns { // Find all unique notification titles for the subject title := n.GetTitle() if _, ok := notificationTitlesMap[title]; !ok { @@ -822,6 +803,7 @@ func queueEmailNotifications(notificationsByUserID map[types.UserId]map[types.Ev } metrics.NotificationsQueued.WithLabelValues("email", string(event)).Inc() + i++ } eventInfo := getEventInfo(event, ns) @@ -893,7 +875,7 @@ func sendEmailNotifications(useDb *sqlx.DB) error { return nil } -func queueWebhookNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, useDB *sqlx.DB) error { +func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { for userID, userNotifications := range notificationsByUserID { var webhooks []types.UserWebhook err := useDB.Select(&webhooks, ` @@ -1250,7 +1232,7 @@ func getUrlPart(validatorIndex uint64) string { return fmt.Sprintf(` For more information visit: https://%s/validator/%v.`, utils.Config.Frontend.SiteDomain, validatorIndex, utils.Config.Frontend.SiteDomain, validatorIndex) } -func collectBlockProposalNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, status uint64, eventName types.EventName, epoch uint64) error { +func collectBlockProposalNotifications(notificationsByUserID types.NotificationsPerUserId, status uint64, eventName types.EventName, epoch uint64) error { type dbResult struct { Proposer uint64 `db:"proposer"` Status uint64 `db:"status"` @@ -1342,12 +1324,12 @@ func collectBlockProposalNotifications(notificationsByUserID map[types.UserId]ma Slot: event.Slot, } if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1445,7 +1427,7 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { return generalPart } -func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) @@ -1530,21 +1512,12 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma EventFilter: hex.EncodeToString(event.EventFilter), } if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - } + notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - if isDuplicate { - continue - } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1639,7 +1612,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma } log.Infof("new event: validator %v detected as offline since epoch %v", validator.Index, epoch) - n := validatorIsOfflineNotification{ + n := &validatorIsOfflineNotification{ SubscriptionID: *sub.ID, ValidatorIndex: validator.Index, IsOffline: true, @@ -1650,23 +1623,13 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma } if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - break - } - } - if isDuplicate { - log.Infof("duplicate offline notification detected") - continue + notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], &n) + + notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1697,7 +1660,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) - n := validatorIsOfflineNotification{ + n := &validatorIsOfflineNotification{ SubscriptionID: *sub.ID, ValidatorIndex: validator.Index, IsOffline: false, @@ -1709,23 +1672,12 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ma } if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} - } - isDuplicate := false - for _, userEvent := range notificationsByUserID[*sub.UserID][n.GetEventName()] { - if userEvent.GetSubscriptionID() == n.SubscriptionID { - isDuplicate = true - break - } - } - if isDuplicate { - log.Infof("duplicate online notification detected") - continue + notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], &n) + notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1950,7 +1902,7 @@ func (n *validatorGotSlashedNotification) GetInfoMarkdown() string { return generalPart } -func collectValidatorGotSlashedNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectValidatorGotSlashedNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { dbResult, err := db.GetValidatorsGotSlashed(epoch) if err != nil { return fmt.Errorf("error getting slashed validators from database, err: %w", err) @@ -2000,12 +1952,12 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID map[types.Use } if _, exists := notificationsByUserID[sub.UserId]; !exists { - notificationsByUserID[sub.UserId] = map[types.EventName][]types.Notification{} + notificationsByUserID[sub.UserId] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[sub.UserId][n.GetEventName()]; !exists { - notificationsByUserID[sub.UserId][n.GetEventName()] = []types.Notification{} + notificationsByUserID[sub.UserId][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[sub.UserId][n.GetEventName()] = append(notificationsByUserID[sub.UserId][n.GetEventName()], n) + notificationsByUserID[sub.UserId][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -2072,7 +2024,7 @@ func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { } // collectWithdrawalNotifications collects all notifications validator withdrawals -func collectWithdrawalNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) if err != nil { @@ -2111,12 +2063,12 @@ func collectWithdrawalNotifications(notificationsByUserID map[types.UserId]map[t UnsubscribeHash: sub.UnsubscribeHash, } if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2235,7 +2187,7 @@ func (n *ethClientNotification) GetInfoMarkdown() string { return generalPart } -func collectEthClientNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates for _, client := range updatedClients { var dbResult []struct { @@ -2272,12 +2224,12 @@ func collectEthClientNotifications(notificationsByUserID map[types.UserId]map[ty UnsubscribeHash: r.UnsubscribeHash, } if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2292,7 +2244,7 @@ type MachineEvents struct { EventThreshold float64 `db:"event_threshold"` } -func collectMonitoringMachineOffline(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineOffline(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { nowTs := time.Now().Unix() return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineOfflineEventName, 120, // notify condition @@ -2311,7 +2263,7 @@ func isMachineDataRecent(machineData *types.MachineMetricSystemUser) bool { return machineData.CurrentDataInsertTs >= nowTs-60*60 } -func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineDiskAlmostFull(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineDiskAlmostFullEventName, 750, // notify condition func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { @@ -2326,7 +2278,7 @@ func collectMonitoringMachineDiskAlmostFull(notificationsByUserID map[types.User ) } -func collectMonitoringMachineCPULoad(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineCPULoad(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineCpuLoadEventName, 10, // notify condition func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { @@ -2348,7 +2300,7 @@ func collectMonitoringMachineCPULoad(notificationsByUserID map[types.UserId]map[ ) } -func collectMonitoringMachineMemoryUsage(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, epoch uint64) error { +func collectMonitoringMachineMemoryUsage(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineMemoryUsageEventName, 10, // notify condition func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { @@ -2369,7 +2321,7 @@ func collectMonitoringMachineMemoryUsage(notificationsByUserID map[types.UserId] var isFirstNotificationCheck = true func collectMonitoringMachine( - notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, + notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epochWaitInBetween int, notifyConditionFulfilled func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool, @@ -2471,12 +2423,12 @@ func collectMonitoringMachine( } //logrus.Infof("notify %v %v", eventName, n) if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.GetEventFilter())] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -2650,7 +2602,7 @@ func (n *taxReportNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectTaxReportNotificationNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectTaxReportNotificationNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { lastStatsDay, err := cache.LatestExportedStatisticDay.GetOrDefault(db.GetLastExportedStatisticDay) if err != nil { @@ -2696,12 +2648,12 @@ func collectTaxReportNotificationNotifications(notificationsByUserID map[types.U UnsubscribeHash: r.UnsubscribeHash, } if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.GetEventFilter())] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -2761,7 +2713,7 @@ func (n *networkNotification) GetInfoMarkdown() string { return generalPart } -func collectNetworkNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { count := 0 err := db.WriterDb.Get(&count, ` SELECT count(ts) FROM network_liveness WHERE (headepoch-finalizedepoch) > 3 AND ts > now() - interval '60 minutes'; @@ -2800,12 +2752,12 @@ func collectNetworkNotifications(notificationsByUserID map[types.UserId]map[type UnsubscribeHash: r.UnsubscribeHash, } if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2861,7 +2813,9 @@ func (n *rocketpoolNotification) GetInfo(includeUrl bool) string { case types.RocketpoolCollateralMinReached: return fmt.Sprintf(`Your RPL collateral has reached your configured threshold at %v%%.`, n.ExtraData) case types.SyncCommitteeSoon: - return getSyncCommitteeSoonInfo([]types.Notification{n}) + return getSyncCommitteeSoonInfo(map[types.EventFilter]types.Notification{ + types.EventFilter(n.EventFilter): n, + }) } return "" @@ -2891,7 +2845,7 @@ func (n *rocketpoolNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectRocketpoolComissionNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { fee := 0.0 err := db.WriterDb.Get(&fee, ` select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; @@ -2932,12 +2886,12 @@ func collectRocketpoolComissionNotifications(notificationsByUserID map[types.Use UnsubscribeHash: r.UnsubscribeHash, } if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2945,7 +2899,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID map[types.Use return nil } -func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName) error { +func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { var ts int64 err := db.WriterDb.Get(&ts, ` select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; @@ -2985,12 +2939,12 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[ty UnsubscribeHash: r.UnsubscribeHash, } if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2998,7 +2952,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID map[ty return nil } -func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { +func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { subMap, err := GetSubsForEventFilter(eventName) if err != nil { return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) @@ -3123,12 +3077,12 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID map[types UnsubscribeHash: sub.UnsubscribeHash, } if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[*sub.UserID][n.GetEventName()] = append(notificationsByUserID[*sub.UserID][n.GetEventName()], n) + notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -3176,7 +3130,7 @@ func bigFloat(x float64) *big.Float { return new(big.Float).SetFloat64(x) } -func collectSyncCommittee(notificationsByUserID map[types.UserId]map[types.EventName][]types.Notification, eventName types.EventName, epoch uint64) error { +func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee nextPeriod := currentPeriod + 1 @@ -3232,12 +3186,12 @@ func collectSyncCommittee(notificationsByUserID map[types.UserId]map[types.Event UnsubscribeHash: r.UnsubscribeHash, } if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName][]types.Notification{} + notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} } if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = []types.Notification{} + notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} } - notificationsByUserID[r.UserID][n.GetEventName()] = append(notificationsByUserID[r.UserID][n.GetEventName()], n) + notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -3254,7 +3208,7 @@ type WebhookQueue struct { LastTry time.Time `db:"last_try"` } -func getEventInfo(event types.EventName, ns []types.Notification) string { +func getEventInfo(event types.EventName, ns map[types.EventFilter]types.Notification) string { switch event { case types.SyncCommitteeSoon: return getSyncCommitteeSoonInfo(ns) @@ -3265,12 +3219,13 @@ func getEventInfo(event types.EventName, ns []types.Notification) string { return "" } -func getSyncCommitteeSoonInfo(ns []types.Notification) string { +func getSyncCommitteeSoonInfo(ns map[types.EventFilter]types.Notification) string { validators := []string{} var startEpoch, endEpoch string var inTime time.Duration - for i, n := range ns { + i := 0 + for _, n := range ns { n, ok := n.(*rocketpoolNotification) if !ok { log.Error(nil, "Sync committee notification not of type rocketpoolNotification", 0) @@ -3296,6 +3251,7 @@ func getSyncCommitteeSoonInfo(ns []types.Notification) string { } inTime = inTime.Round(time.Second) } + i++ } if len(validators) > 0 { From 9661fee2a15be88d2834bb0e15d61d80dde0a5b0 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Fri, 5 Jul 2024 09:54:42 +0200 Subject: [PATCH 005/187] simplify collecting notifications --- backend/pkg/commons/types/frontend.go | 25 ++++ backend/pkg/notification/notifications.go | 168 +++++++++------------- 2 files changed, 93 insertions(+), 100 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 5367951a1..467f6f5c7 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -4,6 +4,7 @@ import ( "database/sql" "database/sql/driver" "encoding/json" + "fmt" "html/template" "math/big" "strings" @@ -12,6 +13,7 @@ import ( "firebase.google.com/go/messaging" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/consapi/types" "github.com/lib/pq" "github.com/pkg/errors" @@ -21,8 +23,30 @@ import ( type EventName string type EventFilter string + type NotificationsPerUserId map[UserId]map[EventName]map[EventFilter]Notification +func (npui NotificationsPerUserId) AddNotification(n Notification) { + + if n.GetUserId() == 0 { + log.Fatal(fmt.Errorf("Notification user id is 0"), fmt.Sprintf("Notification: %v", n), 0) + } + if n.GetEventName() == "" { + log.Fatal(fmt.Errorf("Notification event name is empty"), fmt.Sprintf("Notification: %v", n), 0) + } + if n.GetEventFilter() == "" { + log.Fatal(fmt.Errorf("Notification event filter is empty"), fmt.Sprintf("Notification: %v", n), 0) + } + + if _, ok := npui[n.GetUserId()]; !ok { + npui[n.GetUserId()] = make(map[EventName]map[EventFilter]Notification) + } + if _, ok := npui[n.GetUserId()][n.GetEventName()]; !ok { + npui[n.GetUserId()][EventName(n.GetEventFilter())] = make(map[EventFilter]Notification) + } + npui[n.GetUserId()][n.GetEventName()][EventFilter(n.GetEventFilter())] = n +} + const ( ValidatorBalanceDecreasedEventName EventName = "validator_balance_decreased" ValidatorMissedProposalEventName EventName = "validator_proposal_missed" @@ -252,6 +276,7 @@ type Notification interface { GetEmailAttachment() *EmailAttachment GetUnsubscribeHash() string GetInfoMarkdown() string + GetUserId() UserId } // func UnMarschal diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index e2db123f1..f39309de9 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1315,6 +1315,7 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications log.Infof("creating %v notification for validator %v in epoch %v", eventName, event.Proposer, epoch) n := &validatorProposalNotification{ SubscriptionID: *sub.ID, + UserID: *sub.UserID, ValidatorIndex: event.Proposer, Epoch: epoch, Status: event.Status, @@ -1323,13 +1324,7 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications EventFilter: hex.EncodeToString(pubkey), Slot: event.Slot, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1339,6 +1334,7 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications type validatorProposalNotification struct { SubscriptionID uint64 + UserID types.UserId ValidatorIndex uint64 ValidatorPublicKey string Epoch uint64 @@ -1350,6 +1346,10 @@ type validatorProposalNotification struct { UnsubscribeHash sql.NullString } +func (n *validatorProposalNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *validatorProposalNotification) GetLatestState() string { return "" } @@ -1505,19 +1505,14 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorMissedAttestationEventName, event.ValidatorIndex, event.Epoch) n := &validatorAttestationNotification{ SubscriptionID: *sub.ID, + UserID: *sub.UserID, ValidatorIndex: event.ValidatorIndex, Epoch: event.Epoch, Status: event.Status, EventName: types.ValidatorMissedAttestationEventName, EventFilter: hex.EncodeToString(event.EventFilter), } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1622,14 +1617,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty EventFilter: hex.EncodeToString(validator.Pubkey), } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - - notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1662,6 +1650,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty n := &validatorIsOfflineNotification{ SubscriptionID: *sub.ID, + UserID: *sub.UserID, ValidatorIndex: validator.Index, IsOffline: false, EventEpoch: epoch, @@ -1671,13 +1660,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty EpochsOffline: epochsSinceOffline, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -1687,6 +1670,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty type validatorIsOfflineNotification struct { SubscriptionID uint64 + UserID types.UserId ValidatorIndex uint64 EventEpoch uint64 EpochsOffline uint64 @@ -1697,6 +1681,10 @@ type validatorIsOfflineNotification struct { InternalState string } +func (n *validatorIsOfflineNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *validatorIsOfflineNotification) GetLatestState() string { return n.InternalState } @@ -1762,6 +1750,7 @@ func (n *validatorIsOfflineNotification) GetInfoMarkdown() string { type validatorAttestationNotification struct { SubscriptionID uint64 + UserID types.UserId ValidatorIndex uint64 ValidatorPublicKey string Epoch uint64 @@ -1771,6 +1760,10 @@ type validatorAttestationNotification struct { UnsubscribeHash sql.NullString } +func (n *validatorAttestationNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *validatorAttestationNotification) GetLatestState() string { return "" } @@ -1846,6 +1839,7 @@ func (n *validatorAttestationNotification) GetInfoMarkdown() string { type validatorGotSlashedNotification struct { SubscriptionID uint64 + UserID types.UserId ValidatorIndex uint64 Epoch uint64 Slasher uint64 @@ -1854,6 +1848,10 @@ type validatorGotSlashedNotification struct { UnsubscribeHash sql.NullString } +func (n *validatorGotSlashedNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *validatorGotSlashedNotification) GetLatestState() string { return "" } @@ -1943,6 +1941,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific n := &validatorGotSlashedNotification{ SubscriptionID: sub.Id, + UserID: sub.UserId, Slasher: event.SlasherIndex, Epoch: event.Epoch, Reason: event.Reason, @@ -1950,14 +1949,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), UnsubscribeHash: sub.UnsubscribeHash, } - - if _, exists := notificationsByUserID[sub.UserId]; !exists { - notificationsByUserID[sub.UserId] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[sub.UserId][n.GetEventName()]; !exists { - notificationsByUserID[sub.UserId][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[sub.UserId][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -1965,6 +1957,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific } type validatorWithdrawalNotification struct { + UserID types.UserId SubscriptionID uint64 ValidatorIndex uint64 Epoch uint64 @@ -1975,6 +1968,10 @@ type validatorWithdrawalNotification struct { UnsubscribeHash sql.NullString } +func (n *validatorWithdrawalNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *validatorWithdrawalNotification) GetLatestState() string { return "" } @@ -2054,6 +2051,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) n := &validatorWithdrawalNotification{ SubscriptionID: *sub.ID, + UserID: *sub.UserID, ValidatorIndex: event.ValidatorIndex, Epoch: epoch, Slot: event.Slot, @@ -2062,13 +2060,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer EventFilter: hex.EncodeToString(event.Pubkey), UnsubscribeHash: sub.UnsubscribeHash, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2086,6 +2078,10 @@ type ethClientNotification struct { UnsubscribeHash sql.NullString } +func (n *ethClientNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *ethClientNotification) GetLatestState() string { return "" } @@ -2223,13 +2219,7 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU EthClient: client.Name, UnsubscribeHash: r.UnsubscribeHash, } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2422,13 +2412,7 @@ func collectMonitoringMachine( UnsubscribeHash: r.UnsubscribeHash, } //logrus.Infof("notify %v %v", eventName, n) - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.GetEventFilter())] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -2449,6 +2433,10 @@ type monitorMachineNotification struct { UnsubscribeHash sql.NullString } +func (n *monitorMachineNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *monitorMachineNotification) GetLatestState() string { return "" } @@ -2528,6 +2516,10 @@ type taxReportNotification struct { UnsubscribeHash sql.NullString } +func (n *taxReportNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *taxReportNotification) GetLatestState() string { return "" } @@ -2647,13 +2639,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif EventFilter: r.EventFilter, UnsubscribeHash: r.UnsubscribeHash, } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.GetEventFilter())] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -2668,6 +2654,10 @@ type networkNotification struct { UnsubscribeHash sql.NullString } +func (n *networkNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *networkNotification) GetLatestState() string { return "" } @@ -2751,13 +2741,8 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse EventFilter: r.EventFilter, UnsubscribeHash: r.UnsubscribeHash, } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2775,6 +2760,10 @@ type rocketpoolNotification struct { UnsubscribeHash sql.NullString } +func (n *rocketpoolNotification) GetUserId() types.UserId { + return n.UserID +} + func (n *rocketpoolNotification) GetLatestState() string { return "" } @@ -2885,13 +2874,8 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", UnsubscribeHash: r.UnsubscribeHash, } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -2938,13 +2922,8 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. EventName: eventName, UnsubscribeHash: r.UnsubscribeHash, } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } @@ -3076,13 +3055,8 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.Not ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), UnsubscribeHash: sub.UnsubscribeHash, } - if _, exists := notificationsByUserID[*sub.UserID]; !exists { - notificationsByUserID[*sub.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[*sub.UserID][n.GetEventName()]; !exists { - notificationsByUserID[*sub.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[*sub.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } @@ -3185,13 +3159,7 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ev ExtraData: fmt.Sprintf("%v|%v|%v", mapping[r.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), UnsubscribeHash: r.UnsubscribeHash, } - if _, exists := notificationsByUserID[r.UserID]; !exists { - notificationsByUserID[r.UserID] = map[types.EventName]map[types.EventFilter]types.Notification{} - } - if _, exists := notificationsByUserID[r.UserID][n.GetEventName()]; !exists { - notificationsByUserID[r.UserID][n.GetEventName()] = map[types.EventFilter]types.Notification{} - } - notificationsByUserID[r.UserID][n.GetEventName()][types.EventFilter(n.EventFilter)] = n + notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } From ec0f3c9bedfa00fe196405a087d607f92094f222 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Sat, 6 Jul 2024 10:23:29 +0200 Subject: [PATCH 006/187] refractor notification structs --- backend/pkg/commons/types/frontend.go | 61 ++++ backend/pkg/notification/notifications.go | 383 ++++++---------------- 2 files changed, 157 insertions(+), 287 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 467f6f5c7..a51e5e4c2 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -279,6 +279,67 @@ type Notification interface { GetUserId() UserId } +type NotificationBaseImpl struct { + LatestState string + SubscriptionID uint64 + EventName EventName + Epoch uint64 + Info string + Title string + EventFilter string + EmailAttachment *EmailAttachment + UnsubscribeHash sql.NullString + InfoMarkdown string + UserID UserId +} + +func (n NotificationBaseImpl) GetLatestState() string { + return n.LatestState +} + +func (n NotificationBaseImpl) GetSubscriptionID() uint64 { + return n.SubscriptionID +} + +func (n NotificationBaseImpl) GetEventName() EventName { + return n.EventName +} + +func (n NotificationBaseImpl) GetEpoch() uint64 { + return n.Epoch +} + +func (n NotificationBaseImpl) GetInfo(includeUrl bool) string { + return n.Info +} + +func (n NotificationBaseImpl) GetTitle() string { + return n.Title +} + +func (n NotificationBaseImpl) GetEventFilter() string { + return n.EventFilter +} + +func (n NotificationBaseImpl) GetEmailAttachment() *EmailAttachment { + return n.EmailAttachment +} + +func (n NotificationBaseImpl) GetUnsubscribeHash() string { + if n.UnsubscribeHash.Valid { + return n.UnsubscribeHash.String + } + return "" +} + +func (n NotificationBaseImpl) GetInfoMarkdown() string { + return n.InfoMarkdown +} + +func (n NotificationBaseImpl) GetUserId() UserId { + return n.UserID +} + // func UnMarschal type Subscription struct { diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index f39309de9..b177a8d74 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1504,13 +1504,15 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorMissedAttestationEventName, event.ValidatorIndex, event.Epoch) n := &validatorAttestationNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: event.Epoch, + EventName: types.ValidatorMissedAttestationEventName, + EventFilter: hex.EncodeToString(event.EventFilter), + }, ValidatorIndex: event.ValidatorIndex, - Epoch: event.Epoch, Status: event.Status, - EventName: types.ValidatorMissedAttestationEventName, - EventFilter: hex.EncodeToString(event.EventFilter), } notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() @@ -1608,13 +1610,15 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty log.Infof("new event: validator %v detected as offline since epoch %v", validator.Index, epoch) n := &validatorIsOfflineNotification{ - SubscriptionID: *sub.ID, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + Epoch: epoch, + EventName: types.ValidatorIsOfflineEventName, + LatestState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting + EventFilter: hex.EncodeToString(validator.Pubkey), + }, ValidatorIndex: validator.Index, IsOffline: true, - EventEpoch: epoch, - EventName: types.ValidatorIsOfflineEventName, - InternalState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting - EventFilter: hex.EncodeToString(validator.Pubkey), } notificationsByUserID.AddNotification(n) @@ -1632,7 +1636,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty originalLastSeenEpoch, err := strconv.ParseUint(sub.State.String, 10, 64) if err != nil { - // i have no idea what just happened. + // I have no idea what just happened. return fmt.Errorf("this should never happen. couldn't parse state as uint64: %v", err) } @@ -1649,14 +1653,16 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty log.Infof("new event: validator %v detected as online again at epoch %v", validator.Index, epoch) n := &validatorIsOfflineNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventName: types.ValidatorIsOfflineEventName, + EventFilter: hex.EncodeToString(validator.Pubkey), + LatestState: "-", + }, ValidatorIndex: validator.Index, IsOffline: false, - EventEpoch: epoch, - EventName: types.ValidatorIsOfflineEventName, - InternalState: "-", - EventFilter: hex.EncodeToString(validator.Pubkey), EpochsOffline: epochsSinceOffline, } @@ -1669,50 +1675,26 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty } type validatorIsOfflineNotification struct { - SubscriptionID uint64 - UserID types.UserId - ValidatorIndex uint64 - EventEpoch uint64 - EpochsOffline uint64 - IsOffline bool - EventName types.EventName - EventFilter string - UnsubscribeHash sql.NullString - InternalState string -} - -func (n *validatorIsOfflineNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *validatorIsOfflineNotification) GetLatestState() string { - return n.InternalState -} + types.NotificationBaseImpl -func (n *validatorIsOfflineNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorIsOfflineNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorIsOfflineNotification) GetEpoch() uint64 { - return n.EventEpoch + ValidatorIndex uint64 + EpochsOffline uint64 + IsOffline bool } +// Overwrite specific methods func (n *validatorIsOfflineNotification) GetInfo(includeUrl bool) string { if n.IsOffline { if includeUrl { - return fmt.Sprintf(`Validator %[1]v is offline since epoch %[2]s).`, n.ValidatorIndex, n.InternalState, utils.Config.Frontend.SiteDomain) + return fmt.Sprintf(`Validator %[1]v is offline since epoch %[2]s).`, n.ValidatorIndex, n.LatestState, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.InternalState) + return fmt.Sprintf(`Validator %v is offline since epoch %s.`, n.ValidatorIndex, n.LatestState) } } else { if includeUrl { - return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) + return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) } else { - return fmt.Sprintf(`Validator %v is back online since epoch %v (was offline for %v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, n.EpochsOffline) + return fmt.Sprintf(`Validator %v is back online since epoch %v (was offline for %v epoch(s)).`, n.ValidatorIndex, n.Epoch, n.EpochsOffline) } } } @@ -1725,59 +1707,24 @@ func (n *validatorIsOfflineNotification) GetTitle() string { } } -func (n *validatorIsOfflineNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorIsOfflineNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorIsOfflineNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - func (n *validatorIsOfflineNotification) GetInfoMarkdown() string { if n.IsOffline { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is offline since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain) + return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is offline since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v) (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.EventEpoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) + return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v) (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) } } +func (n *validatorIsOfflineNotification) GetEventName() types.EventName { + return types.ValidatorIsOfflineEventName +} + type validatorAttestationNotification struct { - SubscriptionID uint64 - UserID types.UserId + types.NotificationBaseImpl + ValidatorIndex uint64 ValidatorPublicKey string - Epoch uint64 Status uint64 // * Can be 0 = scheduled | missed, 1 executed - EventName types.EventName - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorAttestationNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *validatorAttestationNotification) GetLatestState() string { - return "" -} - -func (n *validatorAttestationNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorAttestationNotification) GetEventName() types.EventName { - return n.EventName -} - -func (n *validatorAttestationNotification) GetEpoch() uint64 { - return n.Epoch } func (n *validatorAttestationNotification) GetInfo(includeUrl bool) string { @@ -1811,21 +1758,6 @@ func (n *validatorAttestationNotification) GetTitle() string { return "-" } -func (n *validatorAttestationNotification) GetEventFilter() string { - return n.EventFilter -} - -func (n *validatorAttestationNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorAttestationNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - func (n *validatorAttestationNotification) GetInfoMarkdown() string { var generalPart = "" switch n.Status { @@ -1837,46 +1769,16 @@ func (n *validatorAttestationNotification) GetInfoMarkdown() string { return generalPart } -type validatorGotSlashedNotification struct { - SubscriptionID uint64 - UserID types.UserId - ValidatorIndex uint64 - Epoch uint64 - Slasher uint64 - Reason string - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorGotSlashedNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *validatorGotSlashedNotification) GetLatestState() string { - return "" -} - -func (n *validatorGotSlashedNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorGotSlashedNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorGotSlashedNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID +func (n *validatorAttestationNotification) GetEventName() types.EventName { + return types.ValidatorMissedAttestationEventName } -func (n *validatorGotSlashedNotification) GetEpoch() uint64 { - return n.Epoch -} +type validatorGotSlashedNotification struct { + types.NotificationBaseImpl -func (n *validatorGotSlashedNotification) GetEventName() types.EventName { - return types.ValidatorGotSlashedEventName + ValidatorIndex uint64 + Slasher uint64 + Reason string } func (n *validatorGotSlashedNotification) GetInfo(includeUrl bool) string { @@ -1891,8 +1793,8 @@ func (n *validatorGotSlashedNotification) GetTitle() string { return "Validator got Slashed" } -func (n *validatorGotSlashedNotification) GetEventFilter() string { - return n.EventFilter +func (n *validatorGotSlashedNotification) GetEventName() types.EventName { + return types.ValidatorGotSlashedEventName } func (n *validatorGotSlashedNotification) GetInfoMarkdown() string { @@ -1940,14 +1842,16 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific log.Infof("creating %v notification for validator %v in epoch %v", event.SlashedValidatorPubkey, event.Reason, epoch) n := &validatorGotSlashedNotification{ - SubscriptionID: sub.Id, - UserID: sub.UserId, - Slasher: event.SlasherIndex, - Epoch: event.Epoch, - Reason: event.Reason, - ValidatorIndex: event.SlashedValidatorIndex, - EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), - UnsubscribeHash: sub.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: sub.Id, + UserID: sub.UserId, + Epoch: event.Epoch, + EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), + UnsubscribeHash: sub.UnsubscribeHash, + }, + Slasher: event.SlasherIndex, + Reason: event.Reason, + ValidatorIndex: event.SlashedValidatorIndex, } notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() @@ -1957,42 +1861,13 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific } type validatorWithdrawalNotification struct { - UserID types.UserId - SubscriptionID uint64 - ValidatorIndex uint64 - Epoch uint64 - Slot uint64 - Amount uint64 - Address []byte - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *validatorWithdrawalNotification) GetUserId() types.UserId { - return n.UserID -} + types.NotificationBaseImpl -func (n *validatorWithdrawalNotification) GetLatestState() string { - return "" -} - -func (n *validatorWithdrawalNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorWithdrawalNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorWithdrawalNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorWithdrawalNotification) GetEpoch() uint64 { - return n.Epoch + ValidatorIndex uint64 + Epoch uint64 + Slot uint64 + Amount uint64 + Address []byte } func (n *validatorWithdrawalNotification) GetEventName() types.EventName { @@ -2011,10 +1886,6 @@ func (n *validatorWithdrawalNotification) GetTitle() string { return "Withdrawal Processed" } -func (n *validatorWithdrawalNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { generalPart := fmt.Sprintf(`An automatic withdrawal of %[2]v has been processed for validator [%[1]v](https://%[6]v/validator/%[1]v) during slot [%[3]v](https://%[6]v/slot/%[3]v). The funds have been sent to: [%[4]v](https://%[6]v/address/0x%[5]x).`, n.ValidatorIndex, utils.FormatClCurrencyString(n.Amount, utils.Config.Frontend.MainCurrency, 6, true, false, false), n.Slot, utils.FormatHashRaw(n.Address), n.Address, utils.Config.Frontend.SiteDomain) return generalPart @@ -2050,15 +1921,17 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer } // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) n := &validatorWithdrawalNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - ValidatorIndex: event.ValidatorIndex, - Epoch: epoch, - Slot: event.Slot, - Amount: event.Amount, - Address: event.Address, - EventFilter: hex.EncodeToString(event.Pubkey), - UnsubscribeHash: sub.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + EventFilter: hex.EncodeToString(event.Pubkey), + UnsubscribeHash: sub.UnsubscribeHash, + }, + ValidatorIndex: event.ValidatorIndex, + Epoch: epoch, + Slot: event.Slot, + Amount: event.Amount, + Address: event.Address, } notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() @@ -2070,39 +1943,9 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer } type ethClientNotification struct { - SubscriptionID uint64 - UserID types.UserId - Epoch uint64 - EthClient string - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *ethClientNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *ethClientNotification) GetLatestState() string { - return "" -} - -func (n *ethClientNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *ethClientNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} + types.NotificationBaseImpl -func (n *ethClientNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *ethClientNotification) GetEpoch() uint64 { - return n.Epoch + EthClient string } func (n *ethClientNotification) GetEventName() types.EventName { @@ -2147,10 +1990,6 @@ func (n *ethClientNotification) GetTitle() string { return fmt.Sprintf("New %s update", n.EthClient) } -func (n *ethClientNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *ethClientNotification) GetInfoMarkdown() string { url := "" switch n.EthClient { @@ -2212,12 +2051,14 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU for _, r := range dbResult { n := ðClientNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EthClient: client.Name, - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + Epoch: r.Epoch, + EventFilter: r.EventFilter, + UnsubscribeHash: r.UnsubscribeHash, + }, + EthClient: client.Name, } notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() @@ -2404,12 +2245,14 @@ func collectMonitoringMachine( for _, r := range result { n := &monitorMachineNotification{ - SubscriptionID: r.SubscriptionID, - MachineName: r.MachineName, - UserID: r.UserID, - EventName: eventName, - Epoch: epoch, - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + EventName: eventName, + Epoch: epoch, + UnsubscribeHash: r.UnsubscribeHash, + }, + MachineName: r.MachineName, } //logrus.Infof("notify %v %v", eventName, n) notificationsByUserID.AddNotification(n) @@ -2425,43 +2268,9 @@ func collectMonitoringMachine( } type monitorMachineNotification struct { - SubscriptionID uint64 - MachineName string - UserID types.UserId - Epoch uint64 - EventName types.EventName - UnsubscribeHash sql.NullString -} + types.NotificationBaseImpl -func (n *monitorMachineNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *monitorMachineNotification) GetLatestState() string { - return "" -} - -func (n *monitorMachineNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *monitorMachineNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *monitorMachineNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *monitorMachineNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *monitorMachineNotification) GetEventName() types.EventName { - return n.EventName + MachineName string } func (n *monitorMachineNotification) GetInfo(includeUrl bool) string { From 7e235b241195533dd37db02a5ff93b00e6c067bf Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Sat, 6 Jul 2024 10:30:15 +0200 Subject: [PATCH 007/187] add missing structs --- backend/pkg/notification/notifications.go | 258 ++++++---------------- 1 file changed, 65 insertions(+), 193 deletions(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index b177a8d74..3a2dc4ce8 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1314,14 +1314,16 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications } log.Infof("creating %v notification for validator %v in epoch %v", eventName, event.Proposer, epoch) n := &validatorProposalNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventName: eventName, + EventFilter: hex.EncodeToString(pubkey), + }, ValidatorIndex: event.Proposer, - Epoch: epoch, Status: event.Status, - EventName: eventName, Reward: event.ExecRewardETH, - EventFilter: hex.EncodeToString(pubkey), Slot: event.Slot, } notificationsByUserID.AddNotification(n) @@ -1333,48 +1335,12 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications } type validatorProposalNotification struct { - SubscriptionID uint64 - UserID types.UserId - ValidatorIndex uint64 - ValidatorPublicKey string - Epoch uint64 - Slot uint64 - Status uint64 // * Can be 0 = scheduled, 1 executed, 2 missed */ - EventName types.EventName - EventFilter string - Reward float64 - UnsubscribeHash sql.NullString -} - -func (n *validatorProposalNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *validatorProposalNotification) GetLatestState() string { - return "" -} - -func (n *validatorProposalNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *validatorProposalNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *validatorProposalNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *validatorProposalNotification) GetEpoch() uint64 { - return n.Epoch -} + types.NotificationBaseImpl -func (n *validatorProposalNotification) GetEventName() types.EventName { - return n.EventName + ValidatorIndex uint64 + Slot uint64 + Status uint64 // * Can be 0 = scheduled, 1 executed, 2 missed */ + Reward float64 } func (n *validatorProposalNotification) GetInfo(includeUrl bool) string { @@ -1409,10 +1375,6 @@ func (n *validatorProposalNotification) GetTitle() string { return "-" } -func (n *validatorProposalNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *validatorProposalNotification) GetInfoMarkdown() string { var generalPart = "" switch n.Status { @@ -2318,26 +2280,7 @@ func (n *monitorMachineNotification) GetInfoMarkdown() string { } type taxReportNotification struct { - SubscriptionID uint64 - UserID types.UserId - Epoch uint64 - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *taxReportNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *taxReportNotification) GetLatestState() string { - return "" -} - -func (n *taxReportNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" + types.NotificationBaseImpl } func (n *taxReportNotification) GetEmailAttachment() *types.EmailAttachment { @@ -2374,14 +2317,6 @@ func (n *taxReportNotification) GetEmailAttachment() *types.EmailAttachment { return &types.EmailAttachment{Attachment: pdf, Name: fmt.Sprintf("income_history_%v_%v.pdf", firstDay.Format("20060102"), lastDay.Format("20060102"))} } -func (n *taxReportNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *taxReportNotification) GetEpoch() uint64 { - return n.Epoch -} - func (n *taxReportNotification) GetEventName() types.EventName { return types.TaxReportEventName } @@ -2442,11 +2377,13 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif for _, r := range dbResult { n := &taxReportNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + Epoch: r.Epoch, + EventFilter: r.EventFilter, + UnsubscribeHash: r.UnsubscribeHash, + }, } notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() @@ -2456,38 +2393,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif } type networkNotification struct { - SubscriptionID uint64 - UserID types.UserId - Epoch uint64 - EventFilter string - UnsubscribeHash sql.NullString -} - -func (n *networkNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *networkNotification) GetLatestState() string { - return "" -} - -func (n *networkNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *networkNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *networkNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *networkNotification) GetEpoch() uint64 { - return n.Epoch + types.NotificationBaseImpl } func (n *networkNotification) GetEventName() types.EventName { @@ -2503,10 +2409,6 @@ func (n *networkNotification) GetTitle() string { return "Beaconchain Network Issues" } -func (n *networkNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *networkNotification) GetInfoMarkdown() string { generalPart := fmt.Sprintf(`Network experienced finality issues ([view chart](https://%v/charts/network_liveness)).`, utils.Config.Frontend.SiteDomain) return generalPart @@ -2544,11 +2446,13 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse for _, r := range dbResult { n := &networkNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + Epoch: r.Epoch, + EventFilter: r.EventFilter, + UnsubscribeHash: r.UnsubscribeHash, + }, } notificationsByUserID.AddNotification(n) @@ -2560,44 +2464,8 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse } type rocketpoolNotification struct { - SubscriptionID uint64 - UserID types.UserId - Epoch uint64 - EventFilter string - EventName types.EventName - ExtraData string - UnsubscribeHash sql.NullString -} - -func (n *rocketpoolNotification) GetUserId() types.UserId { - return n.UserID -} - -func (n *rocketpoolNotification) GetLatestState() string { - return "" -} - -func (n *rocketpoolNotification) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - -func (n *rocketpoolNotification) GetEmailAttachment() *types.EmailAttachment { - return nil -} - -func (n *rocketpoolNotification) GetSubscriptionID() uint64 { - return n.SubscriptionID -} - -func (n *rocketpoolNotification) GetEpoch() uint64 { - return n.Epoch -} - -func (n *rocketpoolNotification) GetEventName() types.EventName { - return n.EventName + types.NotificationBaseImpl + ExtraData string } func (n *rocketpoolNotification) GetInfo(includeUrl bool) string { @@ -2635,10 +2503,6 @@ func (n *rocketpoolNotification) GetTitle() string { return "" } -func (n *rocketpoolNotification) GetEventFilter() string { - return n.EventFilter -} - func (n *rocketpoolNotification) GetInfoMarkdown() string { return n.GetInfo(false) } @@ -2675,13 +2539,15 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific for _, r := range dbResult { n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + Epoch: r.Epoch, + EventFilter: r.EventFilter, + EventName: eventName, + UnsubscribeHash: r.UnsubscribeHash, + }, + ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", } notificationsByUserID.AddNotification(n) @@ -2724,12 +2590,14 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. for _, r := range dbResult { n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + Epoch: r.Epoch, + EventFilter: r.EventFilter, + EventName: eventName, + UnsubscribeHash: r.UnsubscribeHash, + }, } notificationsByUserID.AddNotification(n) @@ -2856,13 +2724,15 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.Not } n := &rocketpoolNotification{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: epoch, - EventFilter: sub.EventFilter, - EventName: eventName, - ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), - UnsubscribeHash: sub.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: eventName, + UnsubscribeHash: sub.UnsubscribeHash, + }, + ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), } notificationsByUserID.AddNotification(n) @@ -2960,13 +2830,15 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ev for _, r := range dbResult { n := &rocketpoolNotification{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: epoch, - EventFilter: r.EventFilter, - EventName: eventName, - ExtraData: fmt.Sprintf("%v|%v|%v", mapping[r.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), - UnsubscribeHash: r.UnsubscribeHash, + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: r.SubscriptionID, + UserID: r.UserID, + Epoch: epoch, + EventFilter: r.EventFilter, + EventName: eventName, + UnsubscribeHash: r.UnsubscribeHash, + }, + ExtraData: fmt.Sprintf("%v|%v|%v", mapping[r.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), } notificationsByUserID.AddNotification(n) metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() From bf095cc9ed932e12c53de3e68636723b60e4878f Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 8 Jul 2024 10:21:21 +0200 Subject: [PATCH 008/187] refractor GetSubsForEventFilter --- backend/pkg/notification/db.go | 51 ++- backend/pkg/notification/notifications.go | 382 +++++++++++----------- 2 files changed, 235 insertions(+), 198 deletions(-) diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 23f890ba6..36810d6b3 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -1,6 +1,7 @@ package notification import ( + "github.com/doug-martin/goqu/v9" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" @@ -11,14 +12,54 @@ import ( // Map key corresponds to the event filter which can be // a validator pubkey or an eth1 address (for RPL notifications) // or a list of validators for the tax report notifications -func GetSubsForEventFilter(eventName types.EventName) (map[string][]types.Subscription, error) { +// optionally it is possible to set a filter on the last sent ts and the event filter +// fields +func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, lastSentFilterArgs []interface{}, eventFilters []string) (map[string][]types.Subscription, error) { var subs []types.Subscription - subQuery := ` - SELECT id, user_id, event_filter, last_sent_epoch, created_epoch, event_threshold, ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, internal_state from users_subscriptions where event_name = $1 - ` + + // subQuery := ` + // SELECT + // id, + // user_id, + // event_filter, + // last_sent_epoch, + // created_epoch, + // event_threshold, + // ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash, + // internal_state + // from users_subscriptions + // where event_name = $1 + // ` + + ds := goqu.Dialect("postgres").From("users_subscriptions").Select( + goqu.C("id"), + goqu.C("user_id"), + goqu.C("event_filter"), + goqu.C("last_sent_epoch"), + goqu.C("created_epoch"), + goqu.C("event_threshold"), + goqu.L("ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash"), + goqu.C("internal_state"), + ).Where(goqu.C("event_name").Eq(utils.GetNetwork() + ":" + string(eventName))) + + if lastSentFilter != "" { + if len(lastSentFilterArgs) > 0 { + ds = ds.Where(goqu.L(lastSentFilter, lastSentFilterArgs...)) + } else { + ds = ds.Where(goqu.L(lastSentFilter)) + } + } + if len(eventFilters) > 0 { + ds = ds.Where(goqu.L("event_filter = ANY(?)", pq.StringArray(eventFilters))) + } + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return nil, err + } subMap := make(map[string][]types.Subscription, 0) - err := db.FrontendWriterDB.Select(&subs, subQuery, utils.GetNetwork()+":"+string(eventName)) + err = db.FrontendWriterDB.Select(&subs, query, args) if err != nil { return nil, err } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 3a2dc4ce8..0f1a71b3e 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1241,7 +1241,7 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications ExecRewardETH float64 } - subMap, err := GetSubsForEventFilter(eventName) + subMap, err := GetSubsForEventFilter(eventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for (missed) block proposals %w", err) } @@ -1390,7 +1390,7 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { } func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { - subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName) + subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) } @@ -1557,7 +1557,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty return fmt.Errorf("retrieved more than %v online validators notifications: %v, exiting", onlineValidatorsLimit, len(onlineValidators)) } - subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName) + subMap, err = GetSubsForEventFilter(types.ValidatorIsOfflineEventName, "", nil, nil) if err != nil { return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorIsOfflineEventName, err) } @@ -1772,6 +1772,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific query := "" resultsLen := len(dbResult) for i, event := range dbResult { + // TODO: clarify why we need the id here?! query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, ENCODE(unsubscribe_hash, 'hex') AS unsubscribe_hash from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) if i < resultsLen-1 { query += " UNION " @@ -1856,7 +1857,7 @@ func (n *validatorWithdrawalNotification) GetInfoMarkdown() string { // collectWithdrawalNotifications collects all notifications validator withdrawals func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { // get all users that are subscribed to this event (scale: a few thousand rows depending on how many users we have) - subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName) + subMap, err := GetSubsForEventFilter(types.ValidatorReceivedWithdrawalEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missed attestations %w", err) } @@ -1987,43 +1988,43 @@ func (n *ethClientNotification) GetInfoMarkdown() string { func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates for _, client := range updatedClients { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID types.UserId `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE - us.event_name=$1 - AND - us.event_filter=$2 - AND - ((us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP($3) > us.last_sent_ts) OR us.last_sent_ts IS NULL) - `, - eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE + // us.event_name=$1 + // AND + // us.event_filter=$2 + // AND + // ((us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP($3) > us.last_sent_ts) OR us.last_sent_ts IS NULL) + // `, + // eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client + + dbResult, err := GetSubsForEventFilter( + eventName, + "(us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > us.last_sent_ts) OR us.last_sent_ts IS NULL", + []interface{}{client.Date.Unix()}, + []string{strings.ToLower(client.Name)}) if err != nil { return err } - for _, r := range dbResult { - n := ðClientNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - }, - EthClient: client.Name, + for _, subs := range dbResult { + for _, sub := range subs { + n := ðClientNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + UnsubscribeHash: sub.UnsubscribeHash, + }, + EthClient: client.Name, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } } return nil @@ -2041,7 +2042,7 @@ func collectMonitoringMachineOffline(notificationsByUserID types.NotificationsPe nowTs := time.Now().Unix() return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineOfflineEventName, 120, // notify condition - func(_ *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if machineData.CurrentDataInsertTs < nowTs-10*60 && machineData.CurrentDataInsertTs > nowTs-90*60 { return true } @@ -2059,7 +2060,7 @@ func isMachineDataRecent(machineData *types.MachineMetricSystemUser) bool { func collectMonitoringMachineDiskAlmostFull(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineDiskAlmostFullEventName, 750, // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if !isMachineDataRecent(machineData) { return false } @@ -2074,7 +2075,7 @@ func collectMonitoringMachineDiskAlmostFull(notificationsByUserID types.Notifica func collectMonitoringMachineCPULoad(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineCpuLoadEventName, 10, // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if !isMachineDataRecent(machineData) { return false } @@ -2096,7 +2097,7 @@ func collectMonitoringMachineCPULoad(notificationsByUserID types.NotificationsPe func collectMonitoringMachineMemoryUsage(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { return collectMonitoringMachine(notificationsByUserID, types.MonitoringMachineMemoryUsageEventName, 10, // notify condition - func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool { + func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool { if !isMachineDataRecent(machineData) { return false } @@ -2117,21 +2118,23 @@ func collectMonitoringMachine( notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epochWaitInBetween int, - notifyConditionFulfilled func(subscribeData *MachineEvents, machineData *types.MachineMetricSystemUser) bool, + notifyConditionFulfilled func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool, epoch uint64, ) error { - var allSubscribed []MachineEvents + var allSubscribed []*types.Subscription + // event_filter == machine name + // TODO: clarify why we need grouping here?! err := db.FrontendWriterDB.Select(&allSubscribed, `SELECT us.user_id, max(us.id) AS id, ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, - event_filter AS machine, + event_filter, COALESCE(event_threshold, 0) AS event_threshold FROM users_subscriptions us WHERE us.event_name = $1 AND us.created_epoch <= $2 AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) - group by us.user_id, machine, event_threshold`, + group by us.user_id, event_filter, event_threshold`, eventName, epoch, epochWaitInBetween) if err != nil { return err @@ -2139,7 +2142,7 @@ func collectMonitoringMachine( rowKeys := gcp_bigtable.RowList{} for _, data := range allSubscribed { - rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(data.UserID, "system", data.MachineName)) + rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(*data.UserID, "system", data.EventFilter)) } machineDataOfSubscribed, err := db.BigtableClient.GetMachineMetricsForNotifications(rowKeys) @@ -2147,20 +2150,20 @@ func collectMonitoringMachine( return err } - var result []MachineEvents + var result []*types.Subscription for _, data := range allSubscribed { localData := data // Create a local copy of the data variable - machineMap, found := machineDataOfSubscribed[localData.UserID] + machineMap, found := machineDataOfSubscribed[*localData.UserID] if !found { continue } - currentMachineData, found := machineMap[localData.MachineName] + currentMachineData, found := machineMap[localData.EventFilter] if !found { continue } //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) - if notifyConditionFulfilled(&localData, currentMachineData) { + if notifyConditionFulfilled(localData, currentMachineData) { result = append(result, localData) } } @@ -2208,13 +2211,13 @@ func collectMonitoringMachine( for _, r := range result { n := &monitorMachineNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, + SubscriptionID: *r.ID, + UserID: *r.UserID, EventName: eventName, Epoch: epoch, UnsubscribeHash: r.UnsubscribeHash, }, - MachineName: r.MachineName, + MachineName: r.EventFilter, } //logrus.Infof("notify %v %v", eventName, n) notificationsByUserID.AddNotification(n) @@ -2351,42 +2354,37 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif return nil } - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID types.UserId `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - name := string(eventName) - if utils.Config.Chain.ClConfig.ConfigName != "" { - name = utils.Config.Chain.ClConfig.ConfigName + ":" + name - } - - err = db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts < $2 OR (us.last_sent_ts IS NULL AND us.created_ts < $2)); - `, - name, firstDayOfMonth) + // err = db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts < $2 OR (us.last_sent_ts IS NULL AND us.created_ts < $2)); + // `, + // name, firstDayOfMonth) + dbResults, err := GetSubsForEventFilter( + types.TaxReportEventName, + "us.last_sent_ts < ? OR (us.last_sent_ts IS NULL AND us.created_ts < ?)", + []interface{}{firstDayOfMonth, firstDayOfMonth}, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &taxReportNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - }, + for _, subs := range dbResults { + for _, sub := range subs { + n := &taxReportNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + UnsubscribeHash: sub.UnsubscribeHash, + }, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } return nil @@ -2425,38 +2423,38 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse } if count > 0 { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID types.UserId `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL); - `, - utils.GetNetwork()+":"+string(eventName)) - + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL); + // `, + // utils.GetNetwork()+":"+string(eventName)) + + dbResult, err := GetSubsForEventFilter( + eventName, + "us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL", + nil, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &networkNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - UnsubscribeHash: r.UnsubscribeHash, - }, - } + for _, subs := range dbResult { + for _, sub := range subs { + n := &networkNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + UnsubscribeHash: sub.UnsubscribeHash, + }, + } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } } } @@ -2518,40 +2516,40 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific } if fee > 0 { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID types.UserId `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= $2 OR (us.event_threshold < 0 AND us.event_threshold * -1 >= $2)); - `, - utils.GetNetwork()+":"+string(eventName), fee) - + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= $2 OR (us.event_threshold < 0 AND us.event_threshold * -1 >= $2)); + // `, + // utils.GetNetwork()+":"+string(eventName), fee) + + dbResult, err := GetSubsForEventFilter( + eventName, + "(us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= ? OR (us.event_threshold < 0 AND us.event_threshold * -1 >= ?)", + []interface{}{fee, fee}, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &rocketpoolNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - UnsubscribeHash: r.UnsubscribeHash, - }, - ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", - } + for _, subs := range dbResult { + for _, sub := range subs { + n := &rocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: eventName, + UnsubscribeHash: sub.UnsubscribeHash, + }, + ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", + } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } } } @@ -2569,39 +2567,41 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. } if ts+3*60*60 > time.Now().Unix() { - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID types.UserId `db:"user_id"` - Epoch uint64 `db:"created_epoch"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err := db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL); - `, - utils.GetNetwork()+":"+string(eventName)) - + // var dbResult []*types.Subscription + + // err := db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL); + // `, + // utils.GetNetwork()+":"+string(eventName)) + + dbResult, err := GetSubsForEventFilter( + eventName, + "us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL", + nil, + nil, + ) if err != nil { return err } - for _, r := range dbResult { - n := &rocketpoolNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: r.Epoch, - EventFilter: r.EventFilter, - EventName: eventName, - UnsubscribeHash: r.UnsubscribeHash, - }, - } + for _, subs := range dbResult { + for _, sub := range subs { + n := &rocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: eventName, + UnsubscribeHash: sub.UnsubscribeHash, + }, + } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() + } } } @@ -2609,7 +2609,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. } func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { - subMap, err := GetSubsForEventFilter(eventName) + subMap, err := GetSubsForEventFilter(eventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for RocketpoolRPLCollateral %w", err) } @@ -2809,39 +2809,35 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ev pubKeys = append(pubKeys, val.PubKey) } - var dbResult []struct { - SubscriptionID uint64 `db:"id"` - UserID types.UserId `db:"user_id"` - EventFilter string `db:"event_filter"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - } - - err = db.FrontendWriterDB.Select(&dbResult, ` - SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash - FROM users_subscriptions AS us - WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL) AND event_filter = ANY($2); - `, - utils.GetNetwork()+":"+string(eventName), pq.StringArray(pubKeys), - ) + dbResult, err := GetSubsForEventFilter(eventName, "us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL", nil, pubKeys) + // err = db.FrontendWriterDB.Select(&dbResult, ` + // SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash + // FROM users_subscriptions AS us + // WHERE us.event_name=$1 AND (us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL) AND event_filter = ANY($2); + // `, + // utils.GetNetwork()+":"+string(eventName), pq.StringArray(pubKeys), + // ) if err != nil { return err } - for _, r := range dbResult { - n := &rocketpoolNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: r.SubscriptionID, - UserID: r.UserID, - Epoch: epoch, - EventFilter: r.EventFilter, - EventName: eventName, - UnsubscribeHash: r.UnsubscribeHash, - }, - ExtraData: fmt.Sprintf("%v|%v|%v", mapping[r.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), + for _, subs := range dbResult { + for _, sub := range subs { + n := &rocketpoolNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: eventName, + UnsubscribeHash: sub.UnsubscribeHash, + }, + ExtraData: fmt.Sprintf("%v|%v|%v", mapping[sub.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } return nil From f89ce9738c86f21d5b4067c878358308324ef59b Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 8 Jul 2024 10:29:47 +0200 Subject: [PATCH 009/187] fix linter --- backend/pkg/commons/types/frontend.go | 1 - backend/pkg/notification/notifications.go | 13 ++++++++++--- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index a51e5e4c2..f9669b9ec 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -27,7 +27,6 @@ type EventFilter string type NotificationsPerUserId map[UserId]map[EventName]map[EventFilter]Notification func (npui NotificationsPerUserId) AddNotification(n Notification) { - if n.GetUserId() == 0 { log.Fatal(fmt.Errorf("Notification user id is 0"), fmt.Sprintf("Notification: %v", n), 0) } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 0f1a71b3e..c81d29e6a 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -231,6 +231,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { var err error var dbIsCoherent bool + // do a consistency check to make sure that we have all the data we need in the db err = db.WriterDb.Get(&dbIsCoherent, ` SELECT NOT (array[false] && array_agg(is_coherent)) AS is_coherent @@ -263,6 +264,8 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } log.Infof("retrieving dashboard definitions") + // Retrieve all dashboard definitions to be able to retrieve validators included in + // the group notification subscriptions // TODO: add a filter to retrieve only groups that have notifications enabled // Needs a new field in the db var dashboardDefinitions []dashboardDefinitionRow @@ -305,6 +308,8 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) } + // TODO: pass the validatorDashboardConfig to the notification collection functions + err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() @@ -1389,7 +1394,9 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { return generalPart } +// collectAttestationAndOfflineValidatorNotifications collects notifications for missed attestations and offline validators func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { + // Retrieve subscriptions for missed attestations subMap, err := GetSubsForEventFilter(types.ValidatorMissedAttestationEventName, "", nil, nil) if err != nil { return fmt.Errorf("error getting subscriptions for missted attestations %w", err) @@ -1403,12 +1410,13 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty } // get attestations for all validators for the last 4 epochs - + // we need 4 epochs so that can detect the online / offline status of validators validators, err := db.GetValidatorIndices() if err != nil { return err } + // this reads the submitted attestations for the last 4 epochs participationPerEpoch, err := db.GetValidatorAttestationHistoryForNotifications(epoch-3, epoch) if err != nil { return fmt.Errorf("error getting validator attestations from db %w", err) @@ -1988,7 +1996,6 @@ func (n *ethClientNotification) GetInfoMarkdown() string { func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates for _, client := range updatedClients { - // err := db.FrontendWriterDB.Select(&dbResult, ` // SELECT us.id, us.user_id, us.created_epoch, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') AS unsubscribe_hash // FROM users_subscriptions AS us @@ -2362,7 +2369,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif // name, firstDayOfMonth) dbResults, err := GetSubsForEventFilter( - types.TaxReportEventName, + eventName, "us.last_sent_ts < ? OR (us.last_sent_ts IS NULL AND us.created_ts < ?)", []interface{}{firstDayOfMonth, firstDayOfMonth}, nil, From 370bff2970a29c62f63903478c89e4dc7e188185 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 8 Jul 2024 12:18:58 +0200 Subject: [PATCH 010/187] fix bug in AddNotification function --- backend/pkg/commons/types/frontend.go | 4 +- backend/pkg/notification/notifications.go | 96 ++++++++++++----------- 2 files changed, 53 insertions(+), 47 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index f9669b9ec..2a4f0d8ca 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -41,7 +41,7 @@ func (npui NotificationsPerUserId) AddNotification(n Notification) { npui[n.GetUserId()] = make(map[EventName]map[EventFilter]Notification) } if _, ok := npui[n.GetUserId()][n.GetEventName()]; !ok { - npui[n.GetUserId()][EventName(n.GetEventFilter())] = make(map[EventFilter]Notification) + npui[n.GetUserId()][n.GetEventName()] = make(map[EventFilter]Notification) } npui[n.GetUserId()][n.GetEventName()][EventFilter(n.GetEventFilter())] = n } @@ -344,7 +344,7 @@ func (n NotificationBaseImpl) GetUserId() UserId { type Subscription struct { ID *uint64 `db:"id,omitempty"` UserID *UserId `db:"user_id,omitempty"` - EventName string `db:"event_name"` + EventName EventName `db:"event_name"` EventFilter string `db:"event_filter"` LastSent *time.Time `db:"last_sent_ts"` LastEpoch *uint64 `db:"last_sent_epoch"` diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index c81d29e6a..e803f8514 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -309,7 +309,9 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } // TODO: pass the validatorDashboardConfig to the notification collection functions - + // The following functions will collect the notifications and add them to the + // notificationsByUserID map. The notifications will be queued and sent later + // by the notification sender process err = collectAttestationAndOfflineValidatorNotifications(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_missed_attestation").Inc() @@ -352,7 +354,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } log.Infof("collecting withdrawal notifications took: %v", time.Since(start)) - err = collectNetworkNotifications(notificationsByUserID, types.NetworkLivenessIncreasedEventName) + err = collectNetworkNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_network").Inc() return nil, fmt.Errorf("error collecting network notifications: %v", err) @@ -372,7 +374,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { return nil, fmt.Errorf("error collecting rocketpool notifications: %v", err) } } else { - err = collectRocketpoolComissionNotifications(notificationsByUserID, types.RocketpoolCommissionThresholdEventName) + err = collectRocketpoolComissionNotifications(notificationsByUserID) if err != nil { //nolint:misspell metrics.Errors.WithLabelValues("notifications_collect_rocketpool_comission").Inc() @@ -380,7 +382,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } log.Infof("collecting rocketpool commissions took: %v", time.Since(start)) - err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID, types.RocketpoolNewClaimRoundStartedEventName) + err = collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_rocketpool_reward_claim").Inc() return nil, fmt.Errorf("error collecting new rocketpool claim round: %v", err) @@ -403,7 +405,7 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { } } - err = collectSyncCommittee(notificationsByUserID, types.SyncCommitteeSoon, epoch) + err = collectSyncCommittee(notificationsByUserID, epoch) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_sync_committee").Inc() return nil, fmt.Errorf("error collecting sync committee: %v", err) @@ -446,14 +448,14 @@ func collectUserDbNotifications(epoch uint64) (types.NotificationsPerUserId, err } // New ETH clients - err = collectEthClientNotifications(notificationsByUserID, types.EthClientUpdateEventName) + err = collectEthClientNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_eth_client").Inc() return nil, fmt.Errorf("error collecting Eth client notifications: %v", err) } //Tax Report - err = collectTaxReportNotificationNotifications(notificationsByUserID, types.TaxReportEventName) + err = collectTaxReportNotificationNotifications(notificationsByUserID) if err != nil { metrics.Errors.WithLabelValues("notifications_collect_tax_report").Inc() return nil, fmt.Errorf("error collecting tax report notifications: %v", err) @@ -735,6 +737,8 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, notificationTitles = append(notificationTitles, title) } + // TODO: this is bad and will break in case there are a lot of unsubscribe hashes to generate + // the unsubscribe hash should be set when we add the subscription to the db unsubHash := n.GetUnsubscribeHash() if unsubHash == "" { id := n.GetSubscriptionID() @@ -760,10 +764,8 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, `, id) if err != nil { log.Error(err, "error getting user subscription by subscription id", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } + utils.Rollback(tx) + continue } raw := fmt.Sprintf("%v%v%v%v", sub.ID, sub.UserID, sub.EventName, sub.CreatedTime) @@ -772,19 +774,15 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, _, err = tx.Exec("UPDATE users_subscriptions set unsubscribe_hash = $1 WHERE id = $2", digest[:], id) if err != nil { log.Error(err, "error updating users subscriptions table with unsubscribe hash", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } + utils.Rollback(tx) + continue } err = tx.Commit() if err != nil { log.Error(err, "error committing transaction to update users subscriptions with an unsubscribe hash", 0) - err = tx.Rollback() - if err != nil { - log.Error(err, "error rolling back transaction", 0) - } + utils.Rollback(tx) + continue } unsubHash = hex.EncodeToString(digest[:]) @@ -1323,7 +1321,7 @@ func collectBlockProposalNotifications(notificationsByUserID types.Notifications SubscriptionID: *sub.ID, UserID: *sub.UserID, Epoch: epoch, - EventName: eventName, + EventName: sub.EventName, EventFilter: hex.EncodeToString(pubkey), }, ValidatorIndex: event.Proposer, @@ -1478,7 +1476,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty SubscriptionID: *sub.ID, UserID: *sub.UserID, Epoch: event.Epoch, - EventName: types.ValidatorMissedAttestationEventName, + EventName: sub.EventName, EventFilter: hex.EncodeToString(event.EventFilter), }, ValidatorIndex: event.ValidatorIndex, @@ -1583,9 +1581,10 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty NotificationBaseImpl: types.NotificationBaseImpl{ SubscriptionID: *sub.ID, Epoch: epoch, - EventName: types.ValidatorIsOfflineEventName, + EventName: sub.EventName, LatestState: fmt.Sprint(epoch - 2), // first epoch the validator stopped attesting EventFilter: hex.EncodeToString(validator.Pubkey), + UserID: *sub.UserID, }, ValidatorIndex: validator.Index, IsOffline: true, @@ -1627,7 +1626,7 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty SubscriptionID: *sub.ID, UserID: *sub.UserID, Epoch: epoch, - EventName: types.ValidatorIsOfflineEventName, + EventName: sub.EventName, EventFilter: hex.EncodeToString(validator.Pubkey), LatestState: "-", }, @@ -1781,7 +1780,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific resultsLen := len(dbResult) for i, event := range dbResult { // TODO: clarify why we need the id here?! - query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, ENCODE(unsubscribe_hash, 'hex') AS unsubscribe_hash from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) + query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, ENCODE(unsubscribe_hash, 'hex') AS unsubscribe_hash, event_name from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) if i < resultsLen-1 { query += " UNION " } @@ -1792,10 +1791,11 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific } var subscribers []struct { - Ref uint64 `db:"ref"` - Id uint64 `db:"id"` - UserId types.UserId `db:"user_id"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` + Ref uint64 `db:"ref"` + Id uint64 `db:"id"` + UserId types.UserId `db:"user_id"` + UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` + EventName types.EventName `db:"event_name"` } name := string(types.ValidatorGotSlashedEventName) @@ -1819,6 +1819,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific Epoch: event.Epoch, EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), UnsubscribeHash: sub.UnsubscribeHash, + EventName: sub.EventName, }, Slasher: event.SlasherIndex, Reason: event.Reason, @@ -1897,6 +1898,7 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer UserID: *sub.UserID, EventFilter: hex.EncodeToString(event.Pubkey), UnsubscribeHash: sub.UnsubscribeHash, + EventName: sub.EventName, }, ValidatorIndex: event.ValidatorIndex, Epoch: epoch, @@ -1993,7 +1995,7 @@ func (n *ethClientNotification) GetInfoMarkdown() string { return generalPart } -func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { +func collectEthClientNotifications(notificationsByUserID types.NotificationsPerUserId) error { updatedClients := ethclients.GetUpdatedClients() //only check if there are new updates for _, client := range updatedClients { // err := db.FrontendWriterDB.Select(&dbResult, ` @@ -2009,7 +2011,7 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU // eventName, strings.ToLower(client.Name), client.Date.Unix()) // was last notification sent 2 days ago for this client dbResult, err := GetSubsForEventFilter( - eventName, + types.EthClientUpdateEventName, "(us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > us.last_sent_ts) OR us.last_sent_ts IS NULL", []interface{}{client.Date.Unix()}, []string{strings.ToLower(client.Name)}) @@ -2026,6 +2028,7 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU Epoch: sub.CreatedEpoch, EventFilter: sub.EventFilter, UnsubscribeHash: sub.UnsubscribeHash, + EventName: sub.EventName, }, EthClient: client.Name, } @@ -2220,9 +2223,10 @@ func collectMonitoringMachine( NotificationBaseImpl: types.NotificationBaseImpl{ SubscriptionID: *r.ID, UserID: *r.UserID, - EventName: eventName, + EventName: r.EventName, Epoch: epoch, UnsubscribeHash: r.UnsubscribeHash, + EventFilter: r.EventFilter, }, MachineName: r.EventFilter, } @@ -2348,7 +2352,7 @@ func (n *taxReportNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectTaxReportNotificationNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { +func collectTaxReportNotificationNotifications(notificationsByUserID types.NotificationsPerUserId) error { lastStatsDay, err := cache.LatestExportedStatisticDay.GetOrDefault(db.GetLastExportedStatisticDay) if err != nil { @@ -2369,7 +2373,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif // name, firstDayOfMonth) dbResults, err := GetSubsForEventFilter( - eventName, + types.TaxReportEventName, "us.last_sent_ts < ? OR (us.last_sent_ts IS NULL AND us.created_ts < ?)", []interface{}{firstDayOfMonth, firstDayOfMonth}, nil, @@ -2387,6 +2391,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif Epoch: sub.CreatedEpoch, EventFilter: sub.EventFilter, UnsubscribeHash: sub.UnsubscribeHash, + EventName: sub.EventName, }, } notificationsByUserID.AddNotification(n) @@ -2419,7 +2424,7 @@ func (n *networkNotification) GetInfoMarkdown() string { return generalPart } -func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { +func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUserId) error { count := 0 err := db.WriterDb.Get(&count, ` SELECT count(ts) FROM network_liveness WHERE (headepoch-finalizedepoch) > 3 AND ts > now() - interval '60 minutes'; @@ -2438,7 +2443,7 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse // utils.GetNetwork()+":"+string(eventName)) dbResult, err := GetSubsForEventFilter( - eventName, + types.NetworkLivenessIncreasedEventName, "us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL", nil, nil, @@ -2456,6 +2461,7 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse Epoch: sub.CreatedEpoch, EventFilter: sub.EventFilter, UnsubscribeHash: sub.UnsubscribeHash, + EventName: sub.EventName, }, } @@ -2512,7 +2518,7 @@ func (n *rocketpoolNotification) GetInfoMarkdown() string { return n.GetInfo(false) } -func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { +func collectRocketpoolComissionNotifications(notificationsByUserID types.NotificationsPerUserId) error { fee := 0.0 err := db.WriterDb.Get(&fee, ` select current_node_fee from rocketpool_network_stats order by id desc LIMIT 1; @@ -2531,7 +2537,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific // utils.GetNetwork()+":"+string(eventName), fee) dbResult, err := GetSubsForEventFilter( - eventName, + types.RocketpoolCommissionThresholdEventName, "(us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= ? OR (us.event_threshold < 0 AND us.event_threshold * -1 >= ?)", []interface{}{fee, fee}, nil, @@ -2548,7 +2554,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific UserID: *sub.UserID, Epoch: sub.CreatedEpoch, EventFilter: sub.EventFilter, - EventName: eventName, + EventName: sub.EventName, UnsubscribeHash: sub.UnsubscribeHash, }, ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", @@ -2563,7 +2569,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific return nil } -func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName) error { +func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types.NotificationsPerUserId) error { var ts int64 err := db.WriterDb.Get(&ts, ` select date_part('epoch', claim_interval_time_start)::int from rocketpool_network_stats order by id desc LIMIT 1; @@ -2584,7 +2590,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. // utils.GetNetwork()+":"+string(eventName)) dbResult, err := GetSubsForEventFilter( - eventName, + types.RocketpoolNewClaimRoundStartedEventName, "us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL", nil, nil, @@ -2601,7 +2607,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. UserID: *sub.UserID, Epoch: sub.CreatedEpoch, EventFilter: sub.EventFilter, - EventName: eventName, + EventName: sub.EventName, UnsubscribeHash: sub.UnsubscribeHash, }, } @@ -2736,7 +2742,7 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.Not UserID: *sub.UserID, Epoch: epoch, EventFilter: sub.EventFilter, - EventName: eventName, + EventName: sub.EventName, UnsubscribeHash: sub.UnsubscribeHash, }, ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), @@ -2790,7 +2796,7 @@ func bigFloat(x float64) *big.Float { return new(big.Float).SetFloat64(x) } -func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, eventName types.EventName, epoch uint64) error { +func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, epoch uint64) error { slotsPerSyncCommittee := utils.SlotsPerSyncCommittee() currentPeriod := epoch * utils.Config.Chain.ClConfig.SlotsPerEpoch / slotsPerSyncCommittee nextPeriod := currentPeriod + 1 @@ -2816,7 +2822,7 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ev pubKeys = append(pubKeys, val.PubKey) } - dbResult, err := GetSubsForEventFilter(eventName, "us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL", nil, pubKeys) + dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoon, "us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL", nil, pubKeys) // err = db.FrontendWriterDB.Select(&dbResult, ` // SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash // FROM users_subscriptions AS us @@ -2837,7 +2843,7 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ev UserID: *sub.UserID, Epoch: epoch, EventFilter: sub.EventFilter, - EventName: eventName, + EventName: sub.EventName, UnsubscribeHash: sub.UnsubscribeHash, }, ExtraData: fmt.Sprintf("%v|%v|%v", mapping[sub.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), From 65f0c8f055cef326cb5b468fd20586185134ff08 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 8 Jul 2024 13:48:31 +0200 Subject: [PATCH 011/187] remove direct email unsubscribe --- backend/pkg/commons/types/frontend.go | 23 +-- backend/pkg/notification/db.go | 11 +- backend/pkg/notification/notifications.go | 185 +++++++--------------- 3 files changed, 66 insertions(+), 153 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 2a4f0d8ca..9c8f45c04 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -273,7 +273,6 @@ type Notification interface { GetTitle() string GetEventFilter() string GetEmailAttachment() *EmailAttachment - GetUnsubscribeHash() string GetInfoMarkdown() string GetUserId() UserId } @@ -287,7 +286,6 @@ type NotificationBaseImpl struct { Title string EventFilter string EmailAttachment *EmailAttachment - UnsubscribeHash sql.NullString InfoMarkdown string UserID UserId } @@ -324,13 +322,6 @@ func (n NotificationBaseImpl) GetEmailAttachment() *EmailAttachment { return n.EmailAttachment } -func (n NotificationBaseImpl) GetUnsubscribeHash() string { - if n.UnsubscribeHash.Valid { - return n.UnsubscribeHash.String - } - return "" -} - func (n NotificationBaseImpl) GetInfoMarkdown() string { return n.InfoMarkdown } @@ -349,13 +340,12 @@ type Subscription struct { LastSent *time.Time `db:"last_sent_ts"` LastEpoch *uint64 `db:"last_sent_epoch"` // Channels pq.StringArray `db:"channels"` - CreatedTime time.Time `db:"created_ts"` - CreatedEpoch uint64 `db:"created_epoch"` - EventThreshold float64 `db:"event_threshold"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash" swaggertype:"string"` - State sql.NullString `db:"internal_state" swaggertype:"string"` - GroupId *int64 - DashboardId *int64 + CreatedTime time.Time `db:"created_ts"` + CreatedEpoch uint64 `db:"created_epoch"` + EventThreshold float64 `db:"event_threshold"` + State sql.NullString `db:"internal_state" swaggertype:"string"` + GroupId *int64 + DashboardId *int64 } type UserId uint64 @@ -562,7 +552,6 @@ type Email struct { Title string Body template.HTML SubscriptionManageURL template.HTML - UnsubURL template.HTML } type UserWebhook struct { diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 36810d6b3..cbcb9d497 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -38,7 +38,6 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las goqu.C("last_sent_epoch"), goqu.C("created_epoch"), goqu.C("event_threshold"), - goqu.L("ENCODE(unsubscribe_hash, 'hex') as unsubscribe_hash"), goqu.C("internal_state"), ).Where(goqu.C("event_name").Eq(utils.GetNetwork() + ":" + string(eventName))) @@ -68,15 +67,7 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las if _, ok := subMap[sub.EventFilter]; !ok { subMap[sub.EventFilter] = make([]types.Subscription, 0) } - subMap[sub.EventFilter] = append(subMap[sub.EventFilter], types.Subscription{ - UserID: sub.UserID, - ID: sub.ID, - LastEpoch: sub.LastEpoch, - EventFilter: sub.EventFilter, - CreatedEpoch: sub.CreatedEpoch, - EventThreshold: sub.EventThreshold, - State: sub.State, - }) + subMap[sub.EventFilter] = append(subMap[sub.EventFilter], sub) } return subMap, nil } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index e803f8514..2d7b6ff7c 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -3,7 +3,6 @@ package notification import ( "bytes" "context" - "crypto/sha256" "database/sql" "database/sql/driver" "encoding/hex" @@ -11,7 +10,6 @@ import ( "errors" "fmt" - "html" "html/template" "io" "math/big" @@ -598,6 +596,9 @@ func queuePushNotification(notificationsByUserID types.NotificationsPerUserId, u continue } + // todo: this looks like a flawed approach to queue the notifications + // this will issue one db write per user, which is not optimal + // we should batch the notifications and write them in one go go func(userTokens []string, userNotifications map[types.EventName]map[types.EventFilter]types.Notification) { var batch []*messaging.Message for event, ns := range userNotifications { @@ -727,7 +728,6 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, } //nolint:gosec // this is a static string msg.Body += template.HTML(fmt.Sprintf("%s
====

", types.EventLabel[event_title])) - unsubURL := "https://" + utils.Config.Frontend.SiteDomain + "/notifications/unsubscribe" i := 0 for _, n := range ns { // Find all unique notification titles for the subject @@ -737,64 +737,6 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, notificationTitles = append(notificationTitles, title) } - // TODO: this is bad and will break in case there are a lot of unsubscribe hashes to generate - // the unsubscribe hash should be set when we add the subscription to the db - unsubHash := n.GetUnsubscribeHash() - if unsubHash == "" { - id := n.GetSubscriptionID() - - tx, err := db.FrontendWriterDB.Beginx() - if err != nil { - log.Error(err, "error starting transaction", 0) - } - var sub types.Subscription - err = tx.Get(&sub, ` - SELECT - id, - user_id, - event_name, - event_filter, - last_sent_ts, - last_sent_epoch, - created_ts, - created_epoch, - event_threshold - FROM users_subscriptions - WHERE id = $1 - `, id) - if err != nil { - log.Error(err, "error getting user subscription by subscription id", 0) - utils.Rollback(tx) - continue - } - - raw := fmt.Sprintf("%v%v%v%v", sub.ID, sub.UserID, sub.EventName, sub.CreatedTime) - digest := sha256.Sum256([]byte(raw)) - - _, err = tx.Exec("UPDATE users_subscriptions set unsubscribe_hash = $1 WHERE id = $2", digest[:], id) - if err != nil { - log.Error(err, "error updating users subscriptions table with unsubscribe hash", 0) - utils.Rollback(tx) - continue - } - - err = tx.Commit() - if err != nil { - log.Error(err, "error committing transaction to update users subscriptions with an unsubscribe hash", 0) - utils.Rollback(tx) - continue - } - - unsubHash = hex.EncodeToString(digest[:]) - } - if i == 0 { - unsubURL += "?hash=" + html.EscapeString(unsubHash) - } else { - unsubURL += "&hash=" + html.EscapeString(unsubHash) - } - //nolint:gosec // this is a static string - msg.UnsubURL = template.HTML(fmt.Sprintf(`Unsubscribe`, unsubURL)) - if event != types.SyncCommitteeSoon { // SyncCommitteeSoon notifications are summed up in getEventInfo for all validators //nolint:gosec // this is a static string @@ -1150,6 +1092,8 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { notifMap[n.Content.Webhook.ID] = append(notifMap[n.Content.Webhook.ID], n) } for _, webhook := range webhookMap { + // todo: this has the potential to spin up thousands of go routines + // should use an errgroup instead if we decide to keep the aproach go func(webhook types.UserWebhook, reqs []types.TransitDiscord) { defer func() { // update retries counters in db based on end result @@ -1780,7 +1724,7 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific resultsLen := len(dbResult) for i, event := range dbResult { // TODO: clarify why we need the id here?! - query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, ENCODE(unsubscribe_hash, 'hex') AS unsubscribe_hash, event_name from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) + query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, event_name from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) if i < resultsLen-1 { query += " UNION " } @@ -1791,11 +1735,10 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific } var subscribers []struct { - Ref uint64 `db:"ref"` - Id uint64 `db:"id"` - UserId types.UserId `db:"user_id"` - UnsubscribeHash sql.NullString `db:"unsubscribe_hash"` - EventName types.EventName `db:"event_name"` + Ref uint64 `db:"ref"` + Id uint64 `db:"id"` + UserId types.UserId `db:"user_id"` + EventName types.EventName `db:"event_name"` } name := string(types.ValidatorGotSlashedEventName) @@ -1814,12 +1757,11 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific n := &validatorGotSlashedNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: sub.Id, - UserID: sub.UserId, - Epoch: event.Epoch, - EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), - UnsubscribeHash: sub.UnsubscribeHash, - EventName: sub.EventName, + SubscriptionID: sub.Id, + UserID: sub.UserId, + Epoch: event.Epoch, + EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), + EventName: sub.EventName, }, Slasher: event.SlasherIndex, Reason: event.Reason, @@ -1894,11 +1836,10 @@ func collectWithdrawalNotifications(notificationsByUserID types.NotificationsPer // log.Infof("creating %v notification for validator %v in epoch %v", types.ValidatorReceivedWithdrawalEventName, event.ValidatorIndex, epoch) n := &validatorWithdrawalNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - EventFilter: hex.EncodeToString(event.Pubkey), - UnsubscribeHash: sub.UnsubscribeHash, - EventName: sub.EventName, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + EventFilter: hex.EncodeToString(event.Pubkey), + EventName: sub.EventName, }, ValidatorIndex: event.ValidatorIndex, Epoch: epoch, @@ -2023,12 +1964,11 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU for _, sub := range subs { n := ðClientNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: sub.CreatedEpoch, - EventFilter: sub.EventFilter, - UnsubscribeHash: sub.UnsubscribeHash, - EventName: sub.EventName, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, EthClient: client.Name, } @@ -2221,12 +2161,11 @@ func collectMonitoringMachine( for _, r := range result { n := &monitorMachineNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *r.ID, - UserID: *r.UserID, - EventName: r.EventName, - Epoch: epoch, - UnsubscribeHash: r.UnsubscribeHash, - EventFilter: r.EventFilter, + SubscriptionID: *r.ID, + UserID: *r.UserID, + EventName: r.EventName, + Epoch: epoch, + EventFilter: r.EventFilter, }, MachineName: r.EventFilter, } @@ -2386,12 +2325,11 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif for _, sub := range subs { n := &taxReportNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: sub.CreatedEpoch, - EventFilter: sub.EventFilter, - UnsubscribeHash: sub.UnsubscribeHash, - EventName: sub.EventName, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, } notificationsByUserID.AddNotification(n) @@ -2456,12 +2394,11 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse for _, sub := range subs { n := &networkNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: sub.CreatedEpoch, - EventFilter: sub.EventFilter, - UnsubscribeHash: sub.UnsubscribeHash, - EventName: sub.EventName, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, } @@ -2550,12 +2487,11 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific for _, sub := range subs { n := &rocketpoolNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: sub.CreatedEpoch, - EventFilter: sub.EventFilter, - EventName: sub.EventName, - UnsubscribeHash: sub.UnsubscribeHash, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, ExtraData: strconv.FormatInt(int64(fee*100), 10) + "%", } @@ -2603,12 +2539,11 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. for _, sub := range subs { n := &rocketpoolNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: sub.CreatedEpoch, - EventFilter: sub.EventFilter, - EventName: sub.EventName, - UnsubscribeHash: sub.UnsubscribeHash, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: sub.CreatedEpoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, } @@ -2738,12 +2673,11 @@ func collectRocketpoolRPLCollateralNotifications(notificationsByUserID types.Not n := &rocketpoolNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: epoch, - EventFilter: sub.EventFilter, - EventName: sub.EventName, - UnsubscribeHash: sub.UnsubscribeHash, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, ExtraData: strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.2f", threshold*100), "0"), "."), } @@ -2839,12 +2773,11 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ep for _, sub := range subs { n := &rocketpoolNotification{ NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: *sub.ID, - UserID: *sub.UserID, - Epoch: epoch, - EventFilter: sub.EventFilter, - EventName: sub.EventName, - UnsubscribeHash: sub.UnsubscribeHash, + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, }, ExtraData: fmt.Sprintf("%v|%v|%v", mapping[sub.EventFilter], nextPeriod*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod, (nextPeriod+1)*utils.Config.Chain.ClConfig.EpochsPerSyncCommitteePeriod), } From c85a5e5ff247b763c609fb46ad2d7e1a6a5f472f Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 9 Jul 2024 12:25:51 +0200 Subject: [PATCH 012/187] add handling of orphaned block notifications --- backend/pkg/notification/notifications.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 2d7b6ff7c..5ca1a1108 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1306,6 +1306,8 @@ func (n *validatorProposalNotification) GetInfo(includeUrl bool) string { generalPart = fmt.Sprintf(`Validator %s proposed block at slot %s with %v %v execution reward.`, vali, slot, n.Reward, utils.Config.Frontend.ElCurrency) case 2: generalPart = fmt.Sprintf(`Validator %s missed a block proposal at slot %s.`, vali, slot) + case 3: + generalPart = fmt.Sprintf(`Validator %s had an orphaned block proposal at slot %s.`, vali, slot) } return generalPart + suffix } @@ -1318,6 +1320,8 @@ func (n *validatorProposalNotification) GetTitle() string { return "New Block Proposal" case 2: return "Block Proposal Missed" + case 3: + return "Block Proposal Missed (Orphaned)" } return "-" } @@ -1331,6 +1335,8 @@ func (n *validatorProposalNotification) GetInfoMarkdown() string { generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) proposed a new block at slot [%[3]v](https://%[1]v/slot/%[3]v) with %[4]v %[5]v execution reward.`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot, n.Reward, utils.Config.Frontend.ElCurrency) case 2: generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) missed a block proposal at slot [%[3]v](https://%[1]v/slot/%[3]v).`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot) + case 3: + generalPart = fmt.Sprintf(`Validator [%[2]v](https://%[1]v/validator/%[2]v) had an orphaned block proposal at slot [%[3]v](https://%[1]v/slot/%[3]v).`, utils.Config.Frontend.SiteDomain, n.ValidatorIndex, n.Slot) } return generalPart From 2ca40f7f1633ee5447e5bd81cb673d2b90cd9c33 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 9 Jul 2024 12:50:44 +0200 Subject: [PATCH 013/187] implement code review changes --- backend/pkg/commons/db/db.go | 18 +-- backend/pkg/commons/types/exporter.go | 9 ++ backend/pkg/commons/types/frontend.go | 7 +- backend/pkg/notification/db.go | 1 + backend/pkg/notification/notifications.go | 142 +++++++++++----------- 5 files changed, 88 insertions(+), 89 deletions(-) diff --git a/backend/pkg/commons/db/db.go b/backend/pkg/commons/db/db.go index f85f3641e..fe13e3e67 100644 --- a/backend/pkg/commons/db/db.go +++ b/backend/pkg/commons/db/db.go @@ -954,22 +954,8 @@ func GetTotalEligibleEther() (uint64, error) { } // GetValidatorsGotSlashed returns the validators that got slashed after `epoch` either by an attestation violation or a proposer violation -func GetValidatorsGotSlashed(epoch uint64) ([]struct { - Epoch uint64 `db:"epoch"` - SlasherIndex uint64 `db:"slasher"` - SlasherPubkey string `db:"slasher_pubkey"` - SlashedValidatorIndex uint64 `db:"slashedvalidator"` - SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` - Reason string `db:"reason"` -}, error) { - var dbResult []struct { - Epoch uint64 `db:"epoch"` - SlasherIndex uint64 `db:"slasher"` - SlasherPubkey string `db:"slasher_pubkey"` - SlashedValidatorIndex uint64 `db:"slashedvalidator"` - SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` - Reason string `db:"reason"` - } +func GetValidatorsGotSlashed(epoch uint64) ([]*types.SlashingInfo, error) { + var dbResult []*types.SlashingInfo err := ReaderDb.Select(&dbResult, ` WITH slashings AS ( diff --git a/backend/pkg/commons/types/exporter.go b/backend/pkg/commons/types/exporter.go index 5a3f3a397..197638472 100644 --- a/backend/pkg/commons/types/exporter.go +++ b/backend/pkg/commons/types/exporter.go @@ -709,3 +709,12 @@ type RedisCachedValidatorsMapping struct { Epoch Epoch Mapping []*CachedValidator } + +type SlashingInfo struct { + Epoch uint64 `db:"epoch"` + SlasherIndex uint64 `db:"slasher"` + SlasherPubkey string `db:"slasher_pubkey"` + SlashedValidatorIndex uint64 `db:"slashedvalidator"` + SlashedValidatorPubkey []byte `db:"slashedvalidator_pubkey"` + Reason string `db:"reason"` +} diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 9c8f45c04..5c90a8be3 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -33,9 +33,10 @@ func (npui NotificationsPerUserId) AddNotification(n Notification) { if n.GetEventName() == "" { log.Fatal(fmt.Errorf("Notification event name is empty"), fmt.Sprintf("Notification: %v", n), 0) } - if n.GetEventFilter() == "" { - log.Fatal(fmt.Errorf("Notification event filter is empty"), fmt.Sprintf("Notification: %v", n), 0) - } + // next check is disabled as there are events that do not require a filter (rocketpool, network events) + // if n.GetEventFilter() == "" { + // log.Fatal(fmt.Errorf("Notification event filter is empty"), fmt.Sprintf("Notification: %v", n), 0) + // } if _, ok := npui[n.GetUserId()]; !ok { npui[n.GetUserId()] = make(map[EventName]map[EventFilter]Notification) diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index cbcb9d497..7b8948d34 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -12,6 +12,7 @@ import ( // Map key corresponds to the event filter which can be // a validator pubkey or an eth1 address (for RPL notifications) // or a list of validators for the tax report notifications +// or a machine name for machine notifications or a eth client name for ethereum client update notifications // optionally it is possible to set a filter on the last sent ts and the event filter // fields func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, lastSentFilterArgs []interface{}, eventFilters []string) (map[string][]types.Subscription, error) { diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 5ca1a1108..6b6e0ea0c 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1726,55 +1726,44 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific if err != nil { return fmt.Errorf("error getting slashed validators from database, err: %w", err) } - query := "" - resultsLen := len(dbResult) - for i, event := range dbResult { - // TODO: clarify why we need the id here?! - query += fmt.Sprintf(`SELECT %d AS ref, id, user_id, event_name from users_subscriptions where event_name = $1 AND event_filter = '%x'`, i, event.SlashedValidatorPubkey) - if i < resultsLen-1 { - query += " UNION " - } - } - - if query == "" { - return nil - } - - var subscribers []struct { - Ref uint64 `db:"ref"` - Id uint64 `db:"id"` - UserId types.UserId `db:"user_id"` - EventName types.EventName `db:"event_name"` + slashedPubkeys := make([]string, 0, len(dbResult)) + pubkeyToSlashingInfoMap := make(map[string]*types.SlashingInfo) + for _, event := range dbResult { + pubkeyStr := hex.EncodeToString(event.SlashedValidatorPubkey) + slashedPubkeys = append(slashedPubkeys, pubkeyStr) + pubkeyToSlashingInfoMap[pubkeyStr] = event } - name := string(types.ValidatorGotSlashedEventName) - if utils.Config.Chain.ClConfig.ConfigName != "" { - name = utils.Config.Chain.ClConfig.ConfigName + ":" + name - } - err = db.FrontendWriterDB.Select(&subscribers, query, name) + subscribedUsers, err := GetSubsForEventFilter(types.ValidatorGotSlashedEventName, "", nil, slashedPubkeys) if err != nil { - return fmt.Errorf("error querying subscribers, err: %w", err) + return fmt.Errorf("failed to get subs for %v: %v", types.ValidatorGotSlashedEventName, err) } - for _, sub := range subscribers { - event := dbResult[sub.Ref] + for _, subs := range subscribedUsers { + for _, sub := range subs { - log.Infof("creating %v notification for validator %v in epoch %v", event.SlashedValidatorPubkey, event.Reason, epoch) + event := pubkeyToSlashingInfoMap[sub.EventFilter] + if event == nil { + log.Error(fmt.Errorf("error retrieving slashing info for public key %s", sub.EventFilter), "", 0) + continue + } + log.Infof("creating %v notification for validator %v in epoch %v", event.Reason, sub.EventFilter, epoch) - n := &validatorGotSlashedNotification{ - NotificationBaseImpl: types.NotificationBaseImpl{ - SubscriptionID: sub.Id, - UserID: sub.UserId, - Epoch: event.Epoch, - EventFilter: hex.EncodeToString(event.SlashedValidatorPubkey), - EventName: sub.EventName, - }, - Slasher: event.SlasherIndex, - Reason: event.Reason, - ValidatorIndex: event.SlashedValidatorIndex, + n := &validatorGotSlashedNotification{ + NotificationBaseImpl: types.NotificationBaseImpl{ + SubscriptionID: *sub.ID, + UserID: *sub.UserID, + Epoch: epoch, + EventFilter: sub.EventFilter, + EventName: sub.EventName, + }, + Slasher: event.SlasherIndex, + Reason: event.Reason, + ValidatorIndex: event.SlashedValidatorIndex, + } + notificationsByUserID.AddNotification(n) + metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } - notificationsByUserID.AddNotification(n) - metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc() } return nil @@ -2077,28 +2066,39 @@ func collectMonitoringMachine( notifyConditionFulfilled func(subscribeData *types.Subscription, machineData *types.MachineMetricSystemUser) bool, epoch uint64, ) error { - var allSubscribed []*types.Subscription // event_filter == machine name + + dbResult, err := GetSubsForEventFilter( + eventName, + "us.created_epoch <= ? AND (us.last_sent_epoch < (? - ?) OR us.last_sent_epoch IS NULL)", + []interface{}{epoch, epoch, epochWaitInBetween}, + nil, + ) + // TODO: clarify why we need grouping here?! - err := db.FrontendWriterDB.Select(&allSubscribed, - `SELECT - us.user_id, - max(us.id) AS id, - ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, - event_filter, - COALESCE(event_threshold, 0) AS event_threshold - FROM users_subscriptions us - WHERE us.event_name = $1 AND us.created_epoch <= $2 - AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) - group by us.user_id, event_filter, event_threshold`, - eventName, epoch, epochWaitInBetween) + // err := db.FrontendWriterDB.Select(&allSubscribed, + // `SELECT + // us.user_id, + // max(us.id) AS id, + // ENCODE((array_agg(us.unsubscribe_hash))[1], 'hex') AS unsubscribe_hash, + // event_filter, + // COALESCE(event_threshold, 0) AS event_threshold + // FROM users_subscriptions us + // WHERE us.event_name = $1 AND us.created_epoch <= $2 + // AND (us.last_sent_epoch < ($2 - $3) OR us.last_sent_epoch IS NULL) + // group by us.user_id, event_filter, event_threshold`, + // eventName, epoch, epochWaitInBetween) if err != nil { return err } rowKeys := gcp_bigtable.RowList{} - for _, data := range allSubscribed { - rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(*data.UserID, "system", data.EventFilter)) + totalSubscribed := 0 + for _, data := range dbResult { + for _, sub := range data { + rowKeys = append(rowKeys, db.BigtableClient.GetMachineRowKey(*sub.UserID, "system", sub.EventFilter)) + totalSubscribed++ + } } machineDataOfSubscribed, err := db.BigtableClient.GetMachineMetricsForNotifications(rowKeys) @@ -2107,20 +2107,22 @@ func collectMonitoringMachine( } var result []*types.Subscription - for _, data := range allSubscribed { - localData := data // Create a local copy of the data variable - machineMap, found := machineDataOfSubscribed[*localData.UserID] - if !found { - continue - } - currentMachineData, found := machineMap[localData.EventFilter] - if !found { - continue - } + for _, data := range dbResult { + for _, sub := range data { + localData := sub // Create a local copy of the data variable + machineMap, found := machineDataOfSubscribed[*localData.UserID] + if !found { + continue + } + currentMachineData, found := machineMap[localData.EventFilter] + if !found { + continue + } - //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) - if notifyConditionFulfilled(localData, currentMachineData) { - result = append(result, localData) + //logrus.Infof("currentMachineData %v | %v | %v | %v", currentMachine.CurrentDataInsertTs, currentMachine.CompareDataInsertTs, currentMachine.UserID, currentMachine.Machine) + if notifyConditionFulfilled(&localData, currentMachineData) { + result = append(result, &localData) + } } } @@ -2158,7 +2160,7 @@ func collectMonitoringMachine( subRatioThreshold = subFirstRatioThreshold isFirstNotificationCheck = false } - if float64(len(result))/float64(len(allSubscribed)) >= subRatioThreshold { + if float64(len(result))/float64(totalSubscribed) >= subRatioThreshold { log.Error(nil, fmt.Errorf("error too many users would be notified concerning: %v", eventName), 0) return nil } From f1a2ef83d0f1c545354f8337402440b9cd621c5e Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 9 Jul 2024 12:53:53 +0200 Subject: [PATCH 014/187] please linter --- backend/pkg/notification/notifications.go | 1 - 1 file changed, 1 deletion(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 6b6e0ea0c..51cb926ae 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1741,7 +1741,6 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific for _, subs := range subscribedUsers { for _, sub := range subs { - event := pubkeyToSlashingInfoMap[sub.EventFilter] if event == nil { log.Error(fmt.Errorf("error retrieving slashing info for public key %s", sub.EventFilter), "", 0) From 274816226cd18f5dc8e1cee2bab64fe6cad92883 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 10 Jul 2024 10:29:46 +0200 Subject: [PATCH 015/187] remove internal state for notifications --- backend/pkg/commons/types/frontend.go | 12 ++++---- backend/pkg/notification/db.go | 1 - backend/pkg/notification/notifications.go | 35 ++++++++++++----------- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 5c90a8be3..767aca38b 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -341,12 +341,12 @@ type Subscription struct { LastSent *time.Time `db:"last_sent_ts"` LastEpoch *uint64 `db:"last_sent_epoch"` // Channels pq.StringArray `db:"channels"` - CreatedTime time.Time `db:"created_ts"` - CreatedEpoch uint64 `db:"created_epoch"` - EventThreshold float64 `db:"event_threshold"` - State sql.NullString `db:"internal_state" swaggertype:"string"` - GroupId *int64 - DashboardId *int64 + CreatedTime time.Time `db:"created_ts"` + CreatedEpoch uint64 `db:"created_epoch"` + EventThreshold float64 `db:"event_threshold"` + // State sql.NullString `db:"internal_state" swaggertype:"string"` + GroupId *int64 + DashboardId *int64 } type UserId uint64 diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 7b8948d34..4363c12f7 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -39,7 +39,6 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las goqu.C("last_sent_epoch"), goqu.C("created_epoch"), goqu.C("event_threshold"), - goqu.C("internal_state"), ).Where(goqu.C("event_name").Eq(utils.GetNetwork() + ":" + string(eventName))) if lastSentFilter != "" { diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 51cb926ae..76bedf688 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -492,6 +492,8 @@ func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useD } } } + + // obsolete as notifications are anyway sent on a per-epoch basis for epoch, subIDs := range subByEpoch { // update that we've queued the subscription (last sent rather means last queued) err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch, useDB) @@ -520,6 +522,7 @@ func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useD } } + // no need to batch here as the internal state will become obsolete for state, subs := range stateToSub { subArray := make([]int64, 0) for subID := range subs { @@ -1549,21 +1552,21 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty t := hex.EncodeToString(validator.Pubkey) subs := subMap[t] for _, sub := range subs { - if sub.State.String == "" || sub.State.String == "-" { // discard online notifications that do not have a corresponding offline notification - continue - } + // if sub.State.String == "" || sub.State.String == "-" { // discard online notifications that do not have a corresponding offline notification + // continue + // } - originalLastSeenEpoch, err := strconv.ParseUint(sub.State.String, 10, 64) - if err != nil { - // I have no idea what just happened. - return fmt.Errorf("this should never happen. couldn't parse state as uint64: %v", err) - } + // originalLastSeenEpoch, err := strconv.ParseUint(sub.State.String, 10, 64) + // if err != nil { + // // I have no idea what just happened. + // return fmt.Errorf("this should never happen. couldn't parse state as uint64: %v", err) + // } - epochsSinceOffline := epoch - originalLastSeenEpoch + // epochsSinceOffline := epoch - originalLastSeenEpoch - if epochsSinceOffline > epoch { // fix overflow - epochsSinceOffline = 4 - } + // if epochsSinceOffline > epoch { // fix overflow + // epochsSinceOffline = 4 + // } if sub.UserID == nil || sub.ID == nil { return fmt.Errorf("error expected userId and subId to be defined but got user: %v, sub: %v", sub.UserID, sub.ID) @@ -1582,7 +1585,6 @@ func collectAttestationAndOfflineValidatorNotifications(notificationsByUserID ty }, ValidatorIndex: validator.Index, IsOffline: false, - EpochsOffline: epochsSinceOffline, } notificationsByUserID.AddNotification(n) @@ -1597,7 +1599,6 @@ type validatorIsOfflineNotification struct { types.NotificationBaseImpl ValidatorIndex uint64 - EpochsOffline uint64 IsOffline bool } @@ -1611,9 +1612,9 @@ func (n *validatorIsOfflineNotification) GetInfo(includeUrl bool) string { } } else { if includeUrl { - return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) + return fmt.Sprintf(`Validator %[1]v is back online since epoch %[2]v.`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator %v is back online since epoch %v (was offline for %v epoch(s)).`, n.ValidatorIndex, n.Epoch, n.EpochsOffline) + return fmt.Sprintf(`Validator %v is back online since epoch %v.`, n.ValidatorIndex, n.Epoch) } } } @@ -1630,7 +1631,7 @@ func (n *validatorIsOfflineNotification) GetInfoMarkdown() string { if n.IsOffline { return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is offline since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } else { - return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v) (was offline for %[4]v epoch(s)).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain, n.EpochsOffline) + return fmt.Sprintf(`Validator [%[1]v](https://%[3]v/validator/%[1]v) is back online since epoch [%[2]v](https://%[3]v/epoch/%[2]v).`, n.ValidatorIndex, n.Epoch, utils.Config.Frontend.SiteDomain) } } From d0d7a81bd04cf466af4ce7312f14bfa80a86e963 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Wed, 14 Aug 2024 16:55:39 +0200 Subject: [PATCH 016/187] Implemented service to archive dashboards --- backend/cmd/api/main.go | 1 + backend/cmd/archiver/main.go | 45 ++++++ backend/pkg/api/data_access/archiver.go | 69 ++++++++++ backend/pkg/api/data_access/data_access.go | 16 ++- backend/pkg/api/data_access/dummy.go | 12 +- backend/pkg/api/data_access/user.go | 8 +- backend/pkg/api/data_access/vdb_helpers.go | 2 +- backend/pkg/api/data_access/vdb_management.go | 21 ++- .../api/enums/validator_dashboard_enums.go | 60 ++++++++ backend/pkg/api/handlers/internal.go | 11 +- backend/pkg/api/types/archiver.go | 9 ++ backend/pkg/api/types/user.go | 2 +- backend/pkg/archiver/archiver.go | 128 ++++++++++++++++++ 13 files changed, 365 insertions(+), 19 deletions(-) create mode 100644 backend/cmd/archiver/main.go create mode 100644 backend/pkg/api/data_access/archiver.go create mode 100644 backend/pkg/api/types/archiver.go create mode 100644 backend/pkg/archiver/archiver.go diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index 1e27ecb76..70cb88ae0 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -47,6 +47,7 @@ func main() { dataAccessor = dataaccess.NewDummyService() } else { dataAccessor = dataaccess.NewDataAccessService(cfg) + dataAccessor.StartDataAccessServices() } defer dataAccessor.Close() diff --git a/backend/cmd/archiver/main.go b/backend/cmd/archiver/main.go new file mode 100644 index 000000000..1cbd1fcda --- /dev/null +++ b/backend/cmd/archiver/main.go @@ -0,0 +1,45 @@ +package main + +import ( + "flag" + + dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/commons/version" + + "github.com/gobitfly/beaconchain/pkg/archiver" +) + +func main() { + configPath := flag.String("config", "", "Path to the config file, if empty string defaults will be used") + versionFlag := flag.Bool("version", false, "Show version and exit") + flag.Parse() + + if *versionFlag { + log.Infof(version.Version) + log.Infof(version.GoVersion) + return + } + + cfg := &types.Config{} + err := utils.ReadConfig(cfg, *configPath) + if err != nil { + log.Fatal(err, "error reading config file", 0) + } + utils.Config = cfg + + log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") + + dataAccessor := dataaccess.NewDataAccessService(cfg) + defer dataAccessor.Close() + + archiver, err := archiver.NewArchiver(dataAccessor) + if err != nil { + log.Fatal(err, "error initializing archiving service", 0) + } + go archiver.Start() + utils.WaitForCtrlC() +} diff --git a/backend/pkg/api/data_access/archiver.go b/backend/pkg/api/data_access/archiver.go new file mode 100644 index 000000000..6fdd9296b --- /dev/null +++ b/backend/pkg/api/data_access/archiver.go @@ -0,0 +1,69 @@ +package dataaccess + +import ( + "context" + "database/sql" + + t "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type ArchiverRepository interface { + GetValidatorDashboardsInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) +} + +func (d *DataAccessService) GetValidatorDashboardsInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { + result := make(map[uint64][]t.ArchiverDashboard) + + type DashboardInfo struct { + Id uint64 `db:"id"` + UserId uint64 `db:"user_id"` + IsArchived sql.NullString `db:"is_archived"` + GroupCount uint64 `db:"group_count"` + ValidatorCount uint64 `db:"validator_count"` + } + + var dbReturn []DashboardInfo + err := d.readerDb.Select(&dbReturn, ` + WITH dashboards_groups AS + (SELECT + dashboard_id, + COUNT(id) AS group_count + FROM users_val_dashboards_groups + GROUP BY dashboard_id), + dashboards_validators AS + (SELECT + dashboard_id, + COUNT(validator_index) AS validator_count + FROM users_val_dashboards_validators + GROUP BY dashboard_id) + SELECT + uvd.id, + uvd.user_id, + uvd.is_archived, + COALESCE(dg.group_count, 0) AS group_count, + COALESCE(dv.validator_count, 0) AS validator_count + FROM users_val_dashboards uvd + LEFT JOIN dashboards_groups dg ON uvd.id = dg.dashboard_id + LEFT JOIN dashboards_validators dv ON uvd.id = dv.dashboard_id + `) + if err != nil { + return nil, err + } + + for _, dashboardInfo := range dbReturn { + if _, ok := result[dashboardInfo.UserId]; !ok { + result[dashboardInfo.UserId] = make([]t.ArchiverDashboard, 0) + } + + dashboard := t.ArchiverDashboard{ + DashboardId: dashboardInfo.Id, + IsArchived: dashboardInfo.IsArchived.Valid, + GroupCount: dashboardInfo.GroupCount, + ValidatorCount: dashboardInfo.ValidatorCount, + } + + result[dashboardInfo.UserId] = append(result[dashboardInfo.UserId], dashboard) + } + + return result, nil +} diff --git a/backend/pkg/api/data_access/data_access.go b/backend/pkg/api/data_access/data_access.go index 7441d860e..002eb55ba 100644 --- a/backend/pkg/api/data_access/data_access.go +++ b/backend/pkg/api/data_access/data_access.go @@ -27,7 +27,9 @@ type DataAccessor interface { NotificationsRepository AdminRepository BlockRepository + ArchiverRepository + StartDataAccessServices() Close() GetLatestFinalizedEpoch() (uint64, error) @@ -78,12 +80,6 @@ func NewDataAccessService(cfg *types.Config) *DataAccessService { db.BigtableClient = das.bigtable db.PersistentRedisDbClient = das.persistentRedisDbClient - // Create the services - das.services = services.NewServices(das.readerDb, das.writerDb, das.alloyReader, das.alloyWriter, das.clickhouseReader, das.bigtable, das.persistentRedisDbClient) - - // Initialize the services - das.services.InitServices() - return das } @@ -247,6 +243,14 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { return &dataAccessService } +func (d *DataAccessService) StartDataAccessServices() { + // Create the services + d.services = services.NewServices(d.readerDb, d.writerDb, d.alloyReader, d.alloyWriter, d.clickhouseReader, d.bigtable, d.persistentRedisDbClient) + + // Initialize the services + d.services.InitServices() +} + func (d *DataAccessService) Close() { if d.readerDb != nil { d.readerDb.Close() diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index fbe661645..f64fd545a 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -55,6 +55,10 @@ func commonFakeData(a interface{}) error { return faker.FakeData(a, options.WithRandomMapAndSliceMaxSize(5)) } +func (d *DummyService) StartDataAccessServices() { + // nothing to start +} + func (d *DummyService) Close() { // nothing to close } @@ -224,7 +228,7 @@ func (d *DummyService) RemoveValidatorDashboard(ctx context.Context, dashboardId return nil } -func (d *DummyService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archived bool) (*t.VDBPostArchivingReturnData, error) { +func (d *DummyService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archivedReason *enums.VDBArchivedReason) (*t.VDBPostArchivingReturnData, error) { r := t.VDBPostArchivingReturnData{} err := commonFakeData(&r) return &r, err @@ -817,3 +821,9 @@ func (d *DummyService) GetSlotBlobs(ctx context.Context, chainId, block uint64) err := commonFakeData(&r) return r, err } + +func (d *DummyService) GetValidatorDashboardsInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { + r := make(map[uint64][]t.ArchiverDashboard) + err := commonFakeData(&r) + return r, err +} diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index d989ab39a..132ce25d6 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -315,7 +315,7 @@ var freeTierProduct t.PremiumProduct = t.PremiumProduct{ ProductName: "Free", PremiumPerks: t.PremiumPerks{ AdFree: false, - ValidatorDasboards: 1, + ValidatorDashboards: 1, ValidatorsPerDashboard: 20, ValidatorGroupsPerDashboard: 1, ShareCustomDashboards: false, @@ -420,7 +420,7 @@ func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSu ProductName: "Guppy", PremiumPerks: t.PremiumPerks{ AdFree: true, - ValidatorDasboards: 1, + ValidatorDashboards: 1, ValidatorsPerDashboard: 100, ValidatorGroupsPerDashboard: 3, ShareCustomDashboards: true, @@ -453,7 +453,7 @@ func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSu ProductName: "Dolphin", PremiumPerks: t.PremiumPerks{ AdFree: true, - ValidatorDasboards: 2, + ValidatorDashboards: 2, ValidatorsPerDashboard: 300, ValidatorGroupsPerDashboard: 10, ShareCustomDashboards: true, @@ -486,7 +486,7 @@ func (d *DataAccessService) GetProductSummary(ctx context.Context) (*t.ProductSu ProductName: "Orca", PremiumPerks: t.PremiumPerks{ AdFree: true, - ValidatorDasboards: 2, + ValidatorDashboards: 2, ValidatorsPerDashboard: 1000, ValidatorGroupsPerDashboard: 30, ShareCustomDashboards: true, diff --git a/backend/pkg/api/data_access/vdb_helpers.go b/backend/pkg/api/data_access/vdb_helpers.go index 8420d10a8..8b3055699 100644 --- a/backend/pkg/api/data_access/vdb_helpers.go +++ b/backend/pkg/api/data_access/vdb_helpers.go @@ -24,7 +24,7 @@ type ValidatorDashboardRepository interface { CreateValidatorDashboard(ctx context.Context, userId uint64, name string, network uint64) (*t.VDBPostReturnData, error) RemoveValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary) error - UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archived bool) (*t.VDBPostArchivingReturnData, error) + UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archivedReason *enums.VDBArchivedReason) (*t.VDBPostArchivingReturnData, error) UpdateValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostReturnData, error) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index c0e088ae5..aba5d57a1 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -187,9 +187,24 @@ func (d *DataAccessService) RemoveValidatorDashboard(ctx context.Context, dashbo return nil } -func (d *DataAccessService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archived bool) (*t.VDBPostArchivingReturnData, error) { - // TODO @DATA-ACCESS - return d.dummy.UpdateValidatorDashboardArchiving(ctx, dashboardId, archived) +func (d *DataAccessService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archivedReason *enums.VDBArchivedReason) (*t.VDBPostArchivingReturnData, error) { + result := &t.VDBPostArchivingReturnData{} + + var archivedReasonText *string + if archivedReason != nil { + reason := archivedReason.ToString() + archivedReasonText = &reason + } + + err := d.alloyWriter.GetContext(ctx, result, ` + UPDATE users_val_dashboards SET is_archived = $1 WHERE id = $2 + RETURNING id, is_archived IS NOT NULL AS is_archived + `, archivedReasonText, dashboardId) + if err != nil { + return nil, err + } + + return result, nil } func (d *DataAccessService) UpdateValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostReturnData, error) { diff --git a/backend/pkg/api/enums/validator_dashboard_enums.go b/backend/pkg/api/enums/validator_dashboard_enums.go index d0e182b83..377e105e6 100644 --- a/backend/pkg/api/enums/validator_dashboard_enums.go +++ b/backend/pkg/api/enums/validator_dashboard_enums.go @@ -270,6 +270,66 @@ var VDBManageValidatorsColumns = struct { VDBManageValidatorsWithdrawalCredential, } +// ---------------- +// Validator Dashboard Archived Reasons + +type VDBArchivedReason int + +var _ EnumFactory[VDBArchivedReason] = VDBArchivedReason(0) + +const ( + VDBArchivedUser VDBArchivedReason = iota + VDBArchivedDashboards + VDBArchivedGroups + VDBArchivedValidators +) + +func (r VDBArchivedReason) Int() int { + return int(r) +} + +func (VDBArchivedReason) NewFromString(s string) VDBArchivedReason { + switch s { + case "user": + return VDBArchivedUser + case "dashboard_limit": + return VDBArchivedDashboards + case "group_limit": + return VDBArchivedGroups + case "validator_limit": + return VDBArchivedValidators + default: + return VDBArchivedReason(-1) + } +} + +func (r VDBArchivedReason) ToString() string { + switch r { + case VDBArchivedUser: + return "user" + case VDBArchivedDashboards: + return "dashboard_limit" + case VDBArchivedGroups: + return "group_limit" + case VDBArchivedValidators: + return "validator_limit" + default: + return "" + } +} + +var VDBArchivedReasons = struct { + User VDBArchivedReason + Dashboards VDBArchivedReason + Groups VDBArchivedReason + Validators VDBArchivedReason +}{ + VDBArchivedUser, + VDBArchivedDashboards, + VDBArchivedGroups, + VDBArchivedValidators, +} + // ---------------- // Validator Reward Chart Efficiency Filter diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 1475b169b..08b0e9d51 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -351,7 +351,7 @@ func (h *HandlerService) InternalPostValidatorDashboards(w http.ResponseWriter, handleErr(w, err) return } - if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards && !isUserAdmin(userInfo) { + if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards && !isUserAdmin(userInfo) { returnConflict(w, errors.New("maximum number of validator dashboards reached")) return } @@ -482,7 +482,7 @@ func (h *HandlerService) InternalPutValidatorDashboardArchiving(w http.ResponseW handleErr(w, err) return } - if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards && !isUserAdmin(userInfo) { + if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards && !isUserAdmin(userInfo) { returnConflict(w, errors.New("maximum number of active validator dashboards reached")) return } @@ -496,7 +496,12 @@ func (h *HandlerService) InternalPutValidatorDashboardArchiving(w http.ResponseW } } - data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, req.IsArchived) + var archivedReason *enums.VDBArchivedReason + if req.IsArchived { + archivedReason = &enums.VDBArchivedReasons.User + } + + data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, archivedReason) if err != nil { handleErr(w, err) return diff --git a/backend/pkg/api/types/archiver.go b/backend/pkg/api/types/archiver.go new file mode 100644 index 000000000..cc30a9775 --- /dev/null +++ b/backend/pkg/api/types/archiver.go @@ -0,0 +1,9 @@ +package types + +// count indicator per block details tab; each tab is only present if count > 0 +type ArchiverDashboard struct { + DashboardId uint64 + IsArchived bool + GroupCount uint64 + ValidatorCount uint64 +} diff --git a/backend/pkg/api/types/user.go b/backend/pkg/api/types/user.go index 21ecc7bb8..04f43220b 100644 --- a/backend/pkg/api/types/user.go +++ b/backend/pkg/api/types/user.go @@ -117,7 +117,7 @@ type ExtraDashboardValidatorsPremiumAddon struct { type PremiumPerks struct { AdFree bool `json:"ad_free"` // note that this is somhow redunant, since there is already ApiPerks.NoAds - ValidatorDasboards uint64 `json:"validator_dashboards"` + ValidatorDashboards uint64 `json:"validator_dashboards"` ValidatorsPerDashboard uint64 `json:"validators_per_dashboard"` ValidatorGroupsPerDashboard uint64 `json:"validator_groups_per_dashboard"` ShareCustomDashboards bool `json:"share_custom_dashboards"` diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go new file mode 100644 index 000000000..9b87d290f --- /dev/null +++ b/backend/pkg/archiver/archiver.go @@ -0,0 +1,128 @@ +package archiver + +import ( + "context" + "slices" + "time" + + dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + "github.com/gobitfly/beaconchain/pkg/api/enums" + t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/utils" +) + +const ( + maxArchivedDashboardsCount = 10 +) + +type Archiver struct { + das *dataaccess.DataAccessService +} + +func NewArchiver(d *dataaccess.DataAccessService) (*Archiver, error) { + archiver := &Archiver{ + das: d, + } + return archiver, nil +} + +func (a *Archiver) Start() { + for { + err := a.updateArchivedStatus() + if err != nil { + log.Error(err, "failed indexing blobs", 0) + } + time.Sleep(utils.Day) + } +} + +func (a *Archiver) updateArchivedStatus() error { + ctx := context.Background() + + // Get all dashboards for all users + userDashboards, err := a.das.GetValidatorDashboardsInfo(ctx) + if err != nil { + return err + } + + for userId, dashboards := range userDashboards { + userInfo, err := a.das.GetUserInfo(ctx, userId) + if err != nil { + return err + } + + if userInfo.UserGroup == t.UserGroupAdmin { + // Don't archive or delete anything for admins + continue + } + + var archivedDashboards []uint64 + var activeDashboards []uint64 + + type ArchivedDashboard struct { + DashboardId uint64 + ArchivedReason enums.VDBArchivedReason + } + var dashboardsToBeArchived []ArchivedDashboard + var dashboardsToBeDeleted []uint64 + + // Check if the active user dashboard exceeds the maximum number of groups, or validators + for _, dashboardInfo := range dashboards { + if dashboardInfo.IsArchived { + archivedDashboards = append(archivedDashboards, dashboardInfo.DashboardId) + } else { + if dashboardInfo.GroupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { + dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Groups}) + } else if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { + dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Validators}) + } else { + activeDashboards = append(activeDashboards, dashboardInfo.DashboardId) + } + } + } + + // Check if the user still exceeds the maximum number of active dashboards + dashboardLimit := int(userInfo.PremiumPerks.ValidatorDashboards) + if len(activeDashboards) > dashboardLimit { + slices.Sort(activeDashboards) + for id := 0; id < len(activeDashboards)-dashboardLimit; id++ { + dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: activeDashboards[id], ArchivedReason: enums.VDBArchivedReasons.Dashboards}) + } + } + + // Check if the user exceeds the maximum number of archived dashboards + archivedLimit := maxArchivedDashboardsCount + if len(archivedDashboards)+len(dashboardsToBeArchived) > archivedLimit { + dashboardsToBeDeleted = archivedDashboards + for _, dashboard := range dashboardsToBeArchived { + dashboardsToBeDeleted = append(dashboardsToBeDeleted, dashboard.DashboardId) + } + slices.Sort(dashboardsToBeDeleted) + dashboardsToBeDeleted = dashboardsToBeDeleted[:len(dashboardsToBeDeleted)-archivedLimit] + } + + // Archive dashboards + dashboardsToBeDeletedMap := utils.SliceToMap(dashboardsToBeDeleted) + for _, dashboard := range dashboardsToBeArchived { + if _, ok := dashboardsToBeDeletedMap[dashboard.DashboardId]; ok { + // The dashboard will immediately be deleted, so no need to archive it + continue + } + _, err := a.das.UpdateValidatorDashboardArchiving(ctx, t.VDBIdPrimary(dashboard.DashboardId), &dashboard.ArchivedReason) + if err != nil { + return err + } + } + + // Delete dashboards + for _, dashboardId := range dashboardsToBeDeleted { + err := a.das.RemoveValidatorDashboard(ctx, t.VDBIdPrimary(dashboardId)) + if err != nil { + return err + } + } + } + + return nil +} From 648027f76fdad75ca96f2832ff1299c04ea2aa66 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Mon, 19 Aug 2024 14:08:52 +0200 Subject: [PATCH 017/187] added network field --- backend/pkg/api/data_access/user.go | 3 +++ backend/pkg/api/data_access/vdb_management.go | 2 ++ backend/pkg/api/types/dashboard.go | 1 + frontend/types/api/dashboard.ts | 1 + frontend/types/api/latest_state.ts | 1 + frontend/types/api/user.ts | 8 ++++++++ 6 files changed, 16 insertions(+) diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 01bc6e303..f61998885 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -587,6 +587,7 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 dbReturn := []struct { Id uint64 `db:"id"` Name string `db:"name"` + Network uint64 `db:"network"` IsArchived sql.NullString `db:"is_archived"` PublicId sql.NullString `db:"public_id"` PublicName sql.NullString `db:"public_name"` @@ -597,6 +598,7 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 SELECT uvd.id, uvd.name, + uvd.network, uvd.is_archived, uvds.public_id, uvds.name AS public_name, @@ -614,6 +616,7 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 validatorDashboardMap[row.Id] = &t.ValidatorDashboard{ Id: row.Id, Name: row.Name, + Network: row.Network, PublicIds: []t.VDBPublicId{}, IsArchived: row.IsArchived.Valid, ArchivedReason: row.IsArchived.String, diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 1a28f507f..8b6b1e1b1 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -68,6 +68,7 @@ func (d *DataAccessService) GetValidatorDashboard(ctx context.Context, dashboard wg.Go(func() error { dbReturn := []struct { Name string `db:"name"` + Network string `db:"network"` IsArchived sql.NullString `db:"is_archived"` PublicId sql.NullString `db:"public_id"` PublicName sql.NullString `db:"public_name"` @@ -77,6 +78,7 @@ func (d *DataAccessService) GetValidatorDashboard(ctx context.Context, dashboard err := d.alloyReader.SelectContext(ctx, &dbReturn, ` SELECT uvd.name, + uvd.network, uvd.is_archived, uvds.public_id, uvds.name AS public_name, diff --git a/backend/pkg/api/types/dashboard.go b/backend/pkg/api/types/dashboard.go index eaf49a431..d3e334722 100644 --- a/backend/pkg/api/types/dashboard.go +++ b/backend/pkg/api/types/dashboard.go @@ -7,6 +7,7 @@ type AccountDashboard struct { type ValidatorDashboard struct { Id uint64 `json:"id"` Name string `json:"name"` + Network uint64 `json:"network"` PublicIds []VDBPublicId `json:"public_ids,omitempty"` IsArchived bool `json:"is_archived"` ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'"` diff --git a/frontend/types/api/dashboard.ts b/frontend/types/api/dashboard.ts index 3ab7c3768..16a840350 100644 --- a/frontend/types/api/dashboard.ts +++ b/frontend/types/api/dashboard.ts @@ -12,6 +12,7 @@ export interface AccountDashboard { export interface ValidatorDashboard { id: number /* uint64 */; name: string; + network: number /* uint64 */; public_ids?: VDBPublicId[]; is_archived: boolean; archived_reason?: 'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'; diff --git a/frontend/types/api/latest_state.ts b/frontend/types/api/latest_state.ts index 915266737..856b0dd6d 100644 --- a/frontend/types/api/latest_state.ts +++ b/frontend/types/api/latest_state.ts @@ -16,6 +16,7 @@ export interface EthConversionRate { } export interface LatestStateData { current_slot: number /* uint64 */; + finalized_epoch: number /* uint64 */; exchange_rates: EthConversionRate[]; } export type InternalGetLatestStateResponse = ApiDataResponse; diff --git a/frontend/types/api/user.ts b/frontend/types/api/user.ts index 254657919..c31a2ac76 100644 --- a/frontend/types/api/user.ts +++ b/frontend/types/api/user.ts @@ -5,6 +5,7 @@ import type { ApiDataResponse, ChartHistorySeconds } from './common' ////////// // source: user.go +export const UserGroupAdmin = "ADMIN"; export interface UserInfo { id: number /* uint64 */; email: string; @@ -126,3 +127,10 @@ export interface StripeCreateCheckoutSession { export interface StripeCustomerPortal { url: string; } +export interface OAuthAppData { + ID: number /* uint64 */; + Owner: number /* uint64 */; + AppName: string; + RedirectURI: string; + Active: boolean; +} From 43c860defdc7c6df58eeb344be9f01fbc145280a Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Mon, 19 Aug 2024 14:18:41 +0200 Subject: [PATCH 018/187] Fixed boundary off by one error --- backend/pkg/archiver/archiver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 9b87d290f..10ee99e49 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -72,9 +72,9 @@ func (a *Archiver) updateArchivedStatus() error { if dashboardInfo.IsArchived { archivedDashboards = append(archivedDashboards, dashboardInfo.DashboardId) } else { - if dashboardInfo.GroupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { + if dashboardInfo.GroupCount > userInfo.PremiumPerks.ValidatorGroupsPerDashboard { dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Groups}) - } else if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { + } else if dashboardInfo.ValidatorCount > userInfo.PremiumPerks.ValidatorsPerDashboard { dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Validators}) } else { activeDashboards = append(activeDashboards, dashboardInfo.DashboardId) From 20fa8db6f466b2e7b56751a913c40b8065bfbe97 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Tue, 20 Aug 2024 15:19:31 +0200 Subject: [PATCH 019/187] Minor fixes --- backend/pkg/api/handlers/common.go | 2 +- backend/pkg/api/handlers/internal.go | 4 ++-- backend/pkg/archiver/archiver.go | 9 +++------ 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index 54ef066ab..71fbe068e 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -80,7 +80,7 @@ const ( gnosis = "gnosis" allowEmpty = true forbidEmpty = false - maxArchivedDashboardsCount = 10 + MaxArchivedDashboardsCount = 10 ) var ( diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 43557441d..c67bf9e45 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -493,12 +493,12 @@ func (h *HandlerService) InternalPutValidatorDashboardArchiving(w http.ResponseW } if !isUserAdmin(userInfo) { if req.IsArchived { - if dashboardCount >= maxArchivedDashboardsCount { + if dashboardCount >= MaxArchivedDashboardsCount { returnConflict(w, errors.New("maximum number of archived validator dashboards reached")) return } } else { - if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards { + if dashboardCount >= userInfo.PremiumPerks.ValidatorDashboards { returnConflict(w, errors.New("maximum number of active validator dashboards reached")) return } diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 10ee99e49..5e9776099 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -7,15 +7,12 @@ import ( dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" "github.com/gobitfly/beaconchain/pkg/api/enums" + "github.com/gobitfly/beaconchain/pkg/api/handlers" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/utils" ) -const ( - maxArchivedDashboardsCount = 10 -) - type Archiver struct { das *dataaccess.DataAccessService } @@ -31,7 +28,7 @@ func (a *Archiver) Start() { for { err := a.updateArchivedStatus() if err != nil { - log.Error(err, "failed indexing blobs", 0) + log.Error(err, "failed updating dashboard archive status", 0) } time.Sleep(utils.Day) } @@ -92,7 +89,7 @@ func (a *Archiver) updateArchivedStatus() error { } // Check if the user exceeds the maximum number of archived dashboards - archivedLimit := maxArchivedDashboardsCount + archivedLimit := handlers.MaxArchivedDashboardsCount if len(archivedDashboards)+len(dashboardsToBeArchived) > archivedLimit { dashboardsToBeDeleted = archivedDashboards for _, dashboard := range dashboardsToBeArchived { From 724d1b211ea856f705caca9e3fdffae92d9c75c1 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Tue, 20 Aug 2024 15:41:29 +0200 Subject: [PATCH 020/187] Fixed lint issue --- backend/pkg/archiver/archiver.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 5e9776099..22c7ac92b 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -106,7 +106,8 @@ func (a *Archiver) updateArchivedStatus() error { // The dashboard will immediately be deleted, so no need to archive it continue } - _, err := a.das.UpdateValidatorDashboardArchiving(ctx, t.VDBIdPrimary(dashboard.DashboardId), &dashboard.ArchivedReason) + archivedReason := dashboard.ArchivedReason + _, err := a.das.UpdateValidatorDashboardArchiving(ctx, t.VDBIdPrimary(dashboard.DashboardId), &archivedReason) if err != nil { return err } From e5c066ec6b359dafefdd861f97ac4dfd50aa7bca Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Thu, 22 Aug 2024 12:22:10 +0200 Subject: [PATCH 021/187] Grouped together the archiving and deleting of dashboards --- backend/pkg/api/data_access/archiver.go | 81 ++++++++++++++++++++++++- backend/pkg/api/data_access/dummy.go | 10 ++- backend/pkg/api/types/archiver.go | 8 ++- backend/pkg/archiver/archiver.go | 69 ++++++++++----------- 4 files changed, 130 insertions(+), 38 deletions(-) diff --git a/backend/pkg/api/data_access/archiver.go b/backend/pkg/api/data_access/archiver.go index 6fdd9296b..7c987b4d0 100644 --- a/backend/pkg/api/data_access/archiver.go +++ b/backend/pkg/api/data_access/archiver.go @@ -3,15 +3,20 @@ package dataaccess import ( "context" "database/sql" + "fmt" + "github.com/doug-martin/goqu/v9" t "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" ) type ArchiverRepository interface { - GetValidatorDashboardsInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) + GetValidatorDashboardsCountInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) + UpdateValidatorDashboardsArchiving(ctx context.Context, dashboards []t.ArchiverDashboardArchiveReason) error + RemoveValidatorDashboards(ctx context.Context, dashboardIds []uint64) error } -func (d *DataAccessService) GetValidatorDashboardsInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { +func (d *DataAccessService) GetValidatorDashboardsCountInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { result := make(map[uint64][]t.ArchiverDashboard) type DashboardInfo struct { @@ -67,3 +72,75 @@ func (d *DataAccessService) GetValidatorDashboardsInfo(ctx context.Context) (map return result, nil } + +func (d *DataAccessService) UpdateValidatorDashboardsArchiving(ctx context.Context, dashboards []t.ArchiverDashboardArchiveReason) error { + ds := goqu.Dialect("postgres").Update("users_val_dashboards") + + cases := goqu.Case() + for _, dashboard := range dashboards { + cases = cases.When(goqu.I("id").Eq(dashboard.DashboardId), dashboard.ArchivedReason.ToString()) + } + + ds = ds.Set(goqu.Record{"is_archived": cases}) + + // Restrict the query to the ids we want to set + ids := make([]interface{}, len(dashboards)) + for i, dashboard := range dashboards { + ids[i] = dashboard.DashboardId + } + ds = ds.Where(goqu.I("id").In(ids...)) + + query, args, err := ds.Prepared(true).ToSQL() + if err != nil { + return fmt.Errorf("error preparing query: %v", err) + } + + _, err = d.writerDb.ExecContext(ctx, query, args...) + return err +} + +func (d *DataAccessService) RemoveValidatorDashboards(ctx context.Context, dashboardIds []uint64) error { + tx, err := d.alloyWriter.BeginTxx(ctx, nil) + if err != nil { + return fmt.Errorf("error starting db transactions to remove validator dashboards: %w", err) + } + defer utils.Rollback(tx) + + // Delete the dashboard + _, err = tx.ExecContext(ctx, ` + DELETE FROM users_val_dashboards WHERE id = ANY($1) + `, dashboardIds) + if err != nil { + return err + } + + // Delete all groups for the dashboard + _, err = tx.ExecContext(ctx, ` + DELETE FROM users_val_dashboards_groups WHERE dashboard_id = ANY($1) + `, dashboardIds) + if err != nil { + return err + } + + // Delete all validators for the dashboard + _, err = tx.ExecContext(ctx, ` + DELETE FROM users_val_dashboards_validators WHERE dashboard_id = ANY($1) + `, dashboardIds) + if err != nil { + return err + } + + // Delete all shared dashboards for the dashboard + _, err = tx.ExecContext(ctx, ` + DELETE FROM users_val_dashboards_sharing WHERE dashboard_id = ANY($1) + `, dashboardIds) + if err != nil { + return err + } + + err = tx.Commit() + if err != nil { + return fmt.Errorf("error committing tx to remove validator dashboards: %w", err) + } + return nil +} diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index e056928b2..eaf8c8879 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -252,12 +252,20 @@ func (d *DummyService) RemoveValidatorDashboard(ctx context.Context, dashboardId return nil } +func (d *DummyService) RemoveValidatorDashboards(ctx context.Context, dashboardIds []uint64) error { + return nil +} + func (d *DummyService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archivedReason *enums.VDBArchivedReason) (*t.VDBPostArchivingReturnData, error) { r := t.VDBPostArchivingReturnData{} err := commonFakeData(&r) return &r, err } +func (d *DummyService) UpdateValidatorDashboardsArchiving(ctx context.Context, dashboards []t.ArchiverDashboardArchiveReason) error { + return nil +} + func (d *DummyService) UpdateValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostReturnData, error) { r := t.VDBPostReturnData{} err := commonFakeData(&r) @@ -846,7 +854,7 @@ func (d *DummyService) GetSlotBlobs(ctx context.Context, chainId, block uint64) return r, err } -func (d *DummyService) GetValidatorDashboardsInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { +func (d *DummyService) GetValidatorDashboardsCountInfo(ctx context.Context) (map[uint64][]t.ArchiverDashboard, error) { r := make(map[uint64][]t.ArchiverDashboard) err := commonFakeData(&r) return r, err diff --git a/backend/pkg/api/types/archiver.go b/backend/pkg/api/types/archiver.go index cc30a9775..ac1666e4a 100644 --- a/backend/pkg/api/types/archiver.go +++ b/backend/pkg/api/types/archiver.go @@ -1,9 +1,15 @@ package types -// count indicator per block details tab; each tab is only present if count > 0 +import "github.com/gobitfly/beaconchain/pkg/api/enums" + type ArchiverDashboard struct { DashboardId uint64 IsArchived bool GroupCount uint64 ValidatorCount uint64 } + +type ArchiverDashboardArchiveReason struct { + DashboardId uint64 + ArchivedReason enums.VDBArchivedReason +} diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 22c7ac92b..1d0be6407 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -35,10 +35,14 @@ func (a *Archiver) Start() { } func (a *Archiver) updateArchivedStatus() error { - ctx := context.Background() + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + + var dashboardsToBeArchived []t.ArchiverDashboardArchiveReason + var dashboardsToBeDeleted []uint64 // Get all dashboards for all users - userDashboards, err := a.das.GetValidatorDashboardsInfo(ctx) + userDashboards, err := a.das.GetValidatorDashboardsCountInfo(ctx) if err != nil { return err } @@ -57,22 +61,15 @@ func (a *Archiver) updateArchivedStatus() error { var archivedDashboards []uint64 var activeDashboards []uint64 - type ArchivedDashboard struct { - DashboardId uint64 - ArchivedReason enums.VDBArchivedReason - } - var dashboardsToBeArchived []ArchivedDashboard - var dashboardsToBeDeleted []uint64 - // Check if the active user dashboard exceeds the maximum number of groups, or validators for _, dashboardInfo := range dashboards { if dashboardInfo.IsArchived { archivedDashboards = append(archivedDashboards, dashboardInfo.DashboardId) } else { if dashboardInfo.GroupCount > userInfo.PremiumPerks.ValidatorGroupsPerDashboard { - dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Groups}) + dashboardsToBeArchived = append(dashboardsToBeArchived, t.ArchiverDashboardArchiveReason{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Groups}) } else if dashboardInfo.ValidatorCount > userInfo.PremiumPerks.ValidatorsPerDashboard { - dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Validators}) + dashboardsToBeArchived = append(dashboardsToBeArchived, t.ArchiverDashboardArchiveReason{DashboardId: dashboardInfo.DashboardId, ArchivedReason: enums.VDBArchivedReasons.Validators}) } else { activeDashboards = append(activeDashboards, dashboardInfo.DashboardId) } @@ -84,41 +81,45 @@ func (a *Archiver) updateArchivedStatus() error { if len(activeDashboards) > dashboardLimit { slices.Sort(activeDashboards) for id := 0; id < len(activeDashboards)-dashboardLimit; id++ { - dashboardsToBeArchived = append(dashboardsToBeArchived, ArchivedDashboard{DashboardId: activeDashboards[id], ArchivedReason: enums.VDBArchivedReasons.Dashboards}) + dashboardsToBeArchived = append(dashboardsToBeArchived, t.ArchiverDashboardArchiveReason{DashboardId: activeDashboards[id], ArchivedReason: enums.VDBArchivedReasons.Dashboards}) } } // Check if the user exceeds the maximum number of archived dashboards archivedLimit := handlers.MaxArchivedDashboardsCount if len(archivedDashboards)+len(dashboardsToBeArchived) > archivedLimit { - dashboardsToBeDeleted = archivedDashboards + dashboardsToBeDeletedForUser := archivedDashboards for _, dashboard := range dashboardsToBeArchived { - dashboardsToBeDeleted = append(dashboardsToBeDeleted, dashboard.DashboardId) + dashboardsToBeDeletedForUser = append(dashboardsToBeDeletedForUser, dashboard.DashboardId) } - slices.Sort(dashboardsToBeDeleted) - dashboardsToBeDeleted = dashboardsToBeDeleted[:len(dashboardsToBeDeleted)-archivedLimit] + slices.Sort(dashboardsToBeDeletedForUser) + dashboardsToBeDeletedForUser = dashboardsToBeDeletedForUser[:len(dashboardsToBeDeletedForUser)-archivedLimit] + dashboardsToBeDeleted = append(dashboardsToBeDeleted, dashboardsToBeDeletedForUser...) } + } - // Archive dashboards - dashboardsToBeDeletedMap := utils.SliceToMap(dashboardsToBeDeleted) - for _, dashboard := range dashboardsToBeArchived { - if _, ok := dashboardsToBeDeletedMap[dashboard.DashboardId]; ok { - // The dashboard will immediately be deleted, so no need to archive it - continue - } - archivedReason := dashboard.ArchivedReason - _, err := a.das.UpdateValidatorDashboardArchiving(ctx, t.VDBIdPrimary(dashboard.DashboardId), &archivedReason) - if err != nil { - return err - } + // Remove dashboards that should be deleted from the to be archived list + dashboardsToBeDeletedMap := utils.SliceToMap(dashboardsToBeDeleted) + for i := 0; i < len(dashboardsToBeArchived); i++ { + if _, ok := dashboardsToBeDeletedMap[dashboardsToBeArchived[i].DashboardId]; ok { + dashboardsToBeArchived = append(dashboardsToBeArchived[:i], dashboardsToBeArchived[i+1:]...) + i-- } + } - // Delete dashboards - for _, dashboardId := range dashboardsToBeDeleted { - err := a.das.RemoveValidatorDashboard(ctx, t.VDBIdPrimary(dashboardId)) - if err != nil { - return err - } + // Archive dashboards + if len(dashboardsToBeArchived) > 0 { + err = a.das.UpdateValidatorDashboardsArchiving(ctx, dashboardsToBeArchived) + if err != nil { + return err + } + } + + // Delete dashboards + if len(dashboardsToBeDeleted) > 0 { + err = a.das.RemoveValidatorDashboards(ctx, dashboardsToBeDeleted) + if err != nil { + return err } } From dfc69f7675fe34e566d312507c08b392be90d2fe Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:32:48 +0200 Subject: [PATCH 022/187] Adjusted for single binary handling --- backend/cmd/archiver/main.go | 12 +++++++----- backend/cmd/main.go | 3 +++ backend/pkg/archiver/archiver.go | 2 +- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/backend/cmd/archiver/main.go b/backend/cmd/archiver/main.go index 1cbd1fcda..e82ecf90c 100644 --- a/backend/cmd/archiver/main.go +++ b/backend/cmd/archiver/main.go @@ -1,7 +1,8 @@ -package main +package archiver import ( "flag" + "os" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" @@ -13,10 +14,11 @@ import ( "github.com/gobitfly/beaconchain/pkg/archiver" ) -func main() { - configPath := flag.String("config", "", "Path to the config file, if empty string defaults will be used") - versionFlag := flag.Bool("version", false, "Show version and exit") - flag.Parse() +func Run() { + fs := flag.NewFlagSet("fs", flag.ExitOnError) + configPath := fs.String("config", "", "Path to the config file, if empty string defaults will be used") + versionFlag := fs.Bool("version", false, "Show version and exit") + _ = fs.Parse(os.Args[2:]) if *versionFlag { log.Infof(version.Version) diff --git a/backend/cmd/main.go b/backend/cmd/main.go index 18dcc85d8..813939263 100644 --- a/backend/cmd/main.go +++ b/backend/cmd/main.go @@ -5,6 +5,7 @@ import ( "os" "github.com/gobitfly/beaconchain/cmd/api" + "github.com/gobitfly/beaconchain/cmd/archiver" "github.com/gobitfly/beaconchain/cmd/blobindexer" "github.com/gobitfly/beaconchain/cmd/eth1indexer" "github.com/gobitfly/beaconchain/cmd/ethstore_exporter" @@ -32,6 +33,8 @@ func main() { switch target { case "api": api.Run() + case "archiver": + archiver.Run() case "blobindexer": blobindexer.Run() case "eth1indexer": diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 1d0be6407..0601ef6ac 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -35,7 +35,7 @@ func (a *Archiver) Start() { } func (a *Archiver) updateArchivedStatus() error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*10) defer cancel() var dashboardsToBeArchived []t.ArchiverDashboardArchiveReason From ab4329615bd852709370e946e3702694094754e8 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Fri, 23 Aug 2024 09:44:35 +0200 Subject: [PATCH 023/187] Fixed linter issue --- backend/cmd/archiver/main.go | 4 ++-- backend/pkg/archiver/archiver.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/backend/cmd/archiver/main.go b/backend/cmd/archiver/main.go index e82ecf90c..51efe8336 100644 --- a/backend/cmd/archiver/main.go +++ b/backend/cmd/archiver/main.go @@ -21,8 +21,8 @@ func Run() { _ = fs.Parse(os.Args[2:]) if *versionFlag { - log.Infof(version.Version) - log.Infof(version.GoVersion) + log.Info(version.Version) + log.Info(version.GoVersion) return } diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 0601ef6ac..5c740a2c7 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -48,6 +48,7 @@ func (a *Archiver) updateArchivedStatus() error { } for userId, dashboards := range userDashboards { + // TODO: For better performance there should exist a method to get all user info at once userInfo, err := a.das.GetUserInfo(ctx, userId) if err != nil { return err From 9d24250f8c109a9ef7bf60e8097c8222951e021c Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Thu, 29 Aug 2024 10:37:01 +0200 Subject: [PATCH 024/187] changed validators to body param --- backend/pkg/api/handlers/internal.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index e5583477c..89fbd85da 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -824,12 +824,17 @@ func (h *HandlerService) InternalDeleteValidatorDashboardValidators(w http.Respo dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) var indices []uint64 var publicKeys []string - if validatorsParam := r.URL.Query().Get("validators"); validatorsParam != "" { - indices, publicKeys = v.checkValidatorList(validatorsParam, allowEmpty) - if v.hasErrors() { - handleErr(w, r, v) - return - } + req := struct { + Validators []intOrString `json:"validators"` + }{} + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + indices, publicKeys = v.checkValidators(req.Validators, allowEmpty) + if v.hasErrors() { + handleErr(w, r, v) + return } validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) if err != nil { From 5f91768ce57372b083fd48fdab8bdf344baa0d73 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Thu, 29 Aug 2024 10:40:20 +0200 Subject: [PATCH 025/187] public endpoint --- backend/pkg/api/handlers/public.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 2251677a4..39c0cf719 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -270,12 +270,17 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) var indices []uint64 var publicKeys []string - if validatorsParam := r.URL.Query().Get("validators"); validatorsParam != "" { - indices, publicKeys = v.checkValidatorList(validatorsParam, allowEmpty) - if v.hasErrors() { - handleErr(w, r, v) - return - } + req := struct { + Validators []intOrString `json:"validators"` + }{} + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + indices, publicKeys = v.checkValidators(req.Validators, allowEmpty) + if v.hasErrors() { + handleErr(w, r, v) + return } validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) if err != nil { From c0c021e1e6c39fcbc55f08b7654c0dd1696f3ae3 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Thu, 29 Aug 2024 13:10:40 +0200 Subject: [PATCH 026/187] efficient slice handling --- backend/pkg/archiver/archiver.go | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/backend/pkg/archiver/archiver.go b/backend/pkg/archiver/archiver.go index 5c740a2c7..6072ef20e 100644 --- a/backend/pkg/archiver/archiver.go +++ b/backend/pkg/archiver/archiver.go @@ -101,12 +101,17 @@ func (a *Archiver) updateArchivedStatus() error { // Remove dashboards that should be deleted from the to be archived list dashboardsToBeDeletedMap := utils.SliceToMap(dashboardsToBeDeleted) - for i := 0; i < len(dashboardsToBeArchived); i++ { + n := len(dashboardsToBeArchived) + for i := 0; i < n; { if _, ok := dashboardsToBeDeletedMap[dashboardsToBeArchived[i].DashboardId]; ok { - dashboardsToBeArchived = append(dashboardsToBeArchived[:i], dashboardsToBeArchived[i+1:]...) - i-- + // Remove the element by shifting the last element to the current index + dashboardsToBeArchived[i] = dashboardsToBeArchived[n-1] + n-- + } else { + i++ } } + dashboardsToBeArchived = dashboardsToBeArchived[:n] // Archive dashboards if len(dashboardsToBeArchived) > 0 { From eb39ef91f6481a29f19d967bd76d375abf076983 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Fri, 30 Aug 2024 10:54:56 +0200 Subject: [PATCH 027/187] added network to /validator-dashboards/id --- backend/pkg/api/data_access/vdb_management.go | 17 ++++++++++++++++- backend/pkg/api/types/validator_dashboard.go | 1 + frontend/types/api/validator_dashboard.ts | 1 + 3 files changed, 18 insertions(+), 1 deletion(-) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 0e31a32ec..59e5d2801 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -67,7 +67,7 @@ func (d *DataAccessService) GetValidatorDashboardInfo(ctx context.Context, dashb wg.Go(func() error { dbReturn := []struct { Name string `db:"name"` - Network string `db:"network"` + Network uint64 `db:"network"` IsArchived sql.NullString `db:"is_archived"` PublicId sql.NullString `db:"public_id"` PublicName sql.NullString `db:"public_name"` @@ -97,6 +97,7 @@ func (d *DataAccessService) GetValidatorDashboardInfo(ctx context.Context, dashb mutex.Lock() result.Id = uint64(dashboardId) result.Name = dbReturn[0].Name + result.Network = dbReturn[0].Network result.IsArchived = dbReturn[0].IsArchived.Valid result.ArchivedReason = dbReturn[0].IsArchived.String @@ -315,6 +316,20 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d data := t.VDBOverviewData{} wg := errgroup.Group{} var err error + + // Network + if dashboardId.Validators == nil { + wg.Go(func() error { + query := `SELECT network + FROM + users_val_dashboards + WHERE + id = $1` + return d.alloyReader.GetContext(ctx, &data.Network, query, dashboardId.Id) + }) + } + // TODO handle network of validator set dashboards + // Groups if dashboardId.Validators == nil && !dashboardId.AggregateGroups { // should have valid primary id diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go index c24781631..c2b223e9d 100644 --- a/backend/pkg/api/types/validator_dashboard.go +++ b/backend/pkg/api/types/validator_dashboard.go @@ -28,6 +28,7 @@ type VDBOverviewBalances struct { type VDBOverviewData struct { Name string `json:"name,omitempty"` + Network uint64 `json:"network"` Groups []VDBOverviewGroup `json:"groups"` Validators VDBOverviewValidators `json:"validators"` Efficiency PeriodicValues[float64] `json:"efficiency"` diff --git a/frontend/types/api/validator_dashboard.ts b/frontend/types/api/validator_dashboard.ts index 4158ecc0c..edb25349d 100644 --- a/frontend/types/api/validator_dashboard.ts +++ b/frontend/types/api/validator_dashboard.ts @@ -28,6 +28,7 @@ export interface VDBOverviewBalances { } export interface VDBOverviewData { name?: string; + network: string; groups: VDBOverviewGroup[]; validators: VDBOverviewValidators; efficiency: PeriodicValues; From 9ee2874216fe3cd92970abacd8fd5a3a7ef3363c Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Mon, 2 Sep 2024 10:43:08 +0200 Subject: [PATCH 028/187] BEDS-382: api: switch from unsafe.Pointer to atomic.Pointer --- .../service_average_network_efficiency.go | 11 +++++------ backend/pkg/api/services/service_slot_viz.go | 16 +++++++--------- .../api/services/service_validator_mapping.go | 11 +++++------ 3 files changed, 17 insertions(+), 21 deletions(-) diff --git a/backend/pkg/api/services/service_average_network_efficiency.go b/backend/pkg/api/services/service_average_network_efficiency.go index 02ee3a98b..725ff0511 100644 --- a/backend/pkg/api/services/service_average_network_efficiency.go +++ b/backend/pkg/api/services/service_average_network_efficiency.go @@ -6,7 +6,6 @@ import ( "sync" "sync/atomic" "time" - "unsafe" "github.com/doug-martin/goqu/v9" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -21,7 +20,7 @@ import ( // TODO: As a service this will not scale well as it is running once on every instance of the api. // Instead of service this should be moved to the exporter. -var currentEfficiencyInfo unsafe.Pointer +var currentEfficiencyInfo atomic.Pointer[EfficiencyData] func (s *Services) startEfficiencyDataService() { for { @@ -129,10 +128,10 @@ func (s *Services) updateEfficiencyData() error { } // update currentEfficiencyInfo - if currentEfficiencyInfo == nil { // info on first iteration + if currentEfficiencyInfo.Load() == nil { // info on first iteration log.Infof("== average network efficiency data updater initialized ==") } - atomic.StorePointer(¤tEfficiencyInfo, unsafe.Pointer(efficiencyInfo)) + currentEfficiencyInfo.Store(efficiencyInfo) return nil } @@ -140,11 +139,11 @@ func (s *Services) updateEfficiencyData() error { // GetCurrentEfficiencyInfo returns the current efficiency info and a function to release the lock // Call release lock after you are done with accessing the data, otherwise it will block the efficiency service from updating func (s *Services) GetCurrentEfficiencyInfo() (*EfficiencyData, error) { - if currentEfficiencyInfo == nil { + if currentEfficiencyInfo.Load() == nil { return nil, fmt.Errorf("%w: efficiencyInfo", ErrWaiting) } - return (*EfficiencyData)(atomic.LoadPointer(¤tEfficiencyInfo)), nil + return currentEfficiencyInfo.Load(), nil } func (s *Services) initEfficiencyInfo() *EfficiencyData { diff --git a/backend/pkg/api/services/service_slot_viz.go b/backend/pkg/api/services/service_slot_viz.go index 13445a9d1..e26145445 100644 --- a/backend/pkg/api/services/service_slot_viz.go +++ b/backend/pkg/api/services/service_slot_viz.go @@ -10,7 +10,6 @@ import ( "sync" "sync/atomic" "time" - "unsafe" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" @@ -26,7 +25,7 @@ import ( "golang.org/x/sync/errgroup" ) -var currentDutiesInfo unsafe.Pointer +var currentDutiesInfo atomic.Pointer[SyncData] func (s *Services) startSlotVizDataService() { for { @@ -47,7 +46,7 @@ func (s *Services) startSlotVizDataService() { func (s *Services) updateSlotVizData() error { var dutiesInfo *SyncData - if currentDutiesInfo == nil { + if currentDutiesInfo.Load() == nil { dutiesInfo = s.initDutiesInfo() } else { dutiesInfo = s.copyAndCleanDutiesInfo() @@ -86,7 +85,7 @@ func (s *Services) updateSlotVizData() error { // if we have fetched epoch assignments before // dont load for this epoch again - if v := (*SyncData)(atomic.LoadPointer(¤tDutiesInfo)); v != nil { + if v := currentDutiesInfo.Load(); v != nil { if v.AssignmentsFetchedForEpoch > 0 { minEpoch = v.AssignmentsFetchedForEpoch + 1 } @@ -272,11 +271,11 @@ func (s *Services) updateSlotVizData() error { log.Debugf("process slotduties extra data: %s", time.Since(startTime)) // update currentDutiesInfo and hence frontend data - if currentDutiesInfo == nil { // info on first iteration + if currentDutiesInfo.Load() == nil { // info on first iteration log.Infof("== slot-viz data updater initialized ==") } - atomic.StorePointer(¤tDutiesInfo, unsafe.Pointer(dutiesInfo)) + currentDutiesInfo.Store(dutiesInfo) return nil } @@ -284,11 +283,10 @@ func (s *Services) updateSlotVizData() error { // GetCurrentDutiesInfo returns the current duties info and a function to release the lock // Call release lock after you are done with accessing the data, otherwise it will block the slot viz service from updating func (s *Services) GetCurrentDutiesInfo() (*SyncData, error) { - if currentDutiesInfo == nil { + if currentDutiesInfo.Load() == nil { return nil, fmt.Errorf("%w: dutiesInfo", ErrWaiting) } - - return (*SyncData)(atomic.LoadPointer(¤tDutiesInfo)), nil + return currentDutiesInfo.Load(), nil } func (s *Services) initDutiesInfo() *SyncData { diff --git a/backend/pkg/api/services/service_validator_mapping.go b/backend/pkg/api/services/service_validator_mapping.go index 542f2a2be..374c6906a 100644 --- a/backend/pkg/api/services/service_validator_mapping.go +++ b/backend/pkg/api/services/service_validator_mapping.go @@ -7,7 +7,6 @@ import ( "fmt" "sync/atomic" "time" - "unsafe" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gobitfly/beaconchain/pkg/commons/cache" @@ -27,7 +26,7 @@ type ValidatorMapping struct { ValidatorMetadata []*types.CachedValidator // note: why pointers? } -var currentValidatorMapping unsafe.Pointer +var currentValidatorMapping atomic.Pointer[ValidatorMapping] var _cachedBufferCompressed = new(bytes.Buffer) var _cachedBufferDecompressed = new(bytes.Buffer) var _cachedRedisValidatorMapping = new(types.RedisCachedValidatorsMapping) @@ -43,7 +42,7 @@ func (s *Services) startIndexMappingService() { r := services.NewStatusReport("api_service_validator_mapping", constants.Default, delay) r(constants.Running, nil) latestEpoch := cache.LatestEpoch.Get() - if currentValidatorMapping == nil || latestEpoch != lastEpochUpdate { + if currentValidatorMapping.Load() == nil || latestEpoch != lastEpochUpdate { err = s.updateValidatorMapping() } if err != nil { @@ -79,7 +78,7 @@ func (s *Services) initValidatorMapping() { c.ValidatorPubkeys[i] = b c.ValidatorIndices[b] = j } - atomic.StorePointer(¤tValidatorMapping, unsafe.Pointer(&c)) + currentValidatorMapping.Store(&c) } func (s *Services) updateValidatorMapping() error { @@ -129,10 +128,10 @@ func (s *Services) updateValidatorMapping() error { // Call release lock after you are done with accessing the data, otherwise it will block the validator mapping service from updating func (s *Services) GetCurrentValidatorMapping() (*ValidatorMapping, error) { // in theory the consumer can just check if the pointer is nil, but this is more explicit - if currentValidatorMapping == nil { + if currentValidatorMapping.Load() == nil { return nil, fmt.Errorf("%w: validator mapping", ErrWaiting) } - return (*ValidatorMapping)(atomic.LoadPointer(¤tValidatorMapping)), nil + return currentValidatorMapping.Load(), nil } func (s *Services) GetPubkeySliceFromIndexSlice(indices []constypes.ValidatorIndex) ([]string, error) { From 3fba633dc998fc46b3534f9a4aca970bfb810243 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Mon, 2 Sep 2024 10:58:14 +0200 Subject: [PATCH 029/187] BEDS-99: monitoring: change log level to trace --- backend/pkg/monitoring/services/clickhouse_epoch.go | 2 +- backend/pkg/monitoring/services/clickhouse_rollings.go | 2 +- backend/pkg/monitoring/services/db_connections.go | 2 +- backend/pkg/monitoring/services/timeout_detector.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/pkg/monitoring/services/clickhouse_epoch.go b/backend/pkg/monitoring/services/clickhouse_epoch.go index 9293bbbd1..ea3ee344f 100644 --- a/backend/pkg/monitoring/services/clickhouse_epoch.go +++ b/backend/pkg/monitoring/services/clickhouse_epoch.go @@ -44,7 +44,7 @@ func (s *ServiceClickhouseEpoch) runChecks() { // ignore return } - log.Debugf("checking clickhouse epoch") + log.Tracef("checking clickhouse epoch") // context with deadline ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) defer cancel() diff --git a/backend/pkg/monitoring/services/clickhouse_rollings.go b/backend/pkg/monitoring/services/clickhouse_rollings.go index 703ce1dd8..52fbd9f6b 100644 --- a/backend/pkg/monitoring/services/clickhouse_rollings.go +++ b/backend/pkg/monitoring/services/clickhouse_rollings.go @@ -62,7 +62,7 @@ func (s *ServiceClickhouseRollings) runChecks() { // ignore return } - log.Debugf("checking clickhouse rolling %s", rolling) + log.Tracef("checking clickhouse rolling %s", rolling) // context with deadline ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) defer cancel() diff --git a/backend/pkg/monitoring/services/db_connections.go b/backend/pkg/monitoring/services/db_connections.go index a071ea509..582618633 100644 --- a/backend/pkg/monitoring/services/db_connections.go +++ b/backend/pkg/monitoring/services/db_connections.go @@ -85,7 +85,7 @@ func (s *ServerDbConnections) checkDBConnections() { wg.Add(1) go func(entry *Entry) { defer wg.Done() - log.Debugf("checking db connection for %s", entry.ID) + log.Tracef("checking db connection for %s", entry.ID) // context with deadline ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) defer cancel() diff --git a/backend/pkg/monitoring/services/timeout_detector.go b/backend/pkg/monitoring/services/timeout_detector.go index 4ae97b258..a485c28d0 100644 --- a/backend/pkg/monitoring/services/timeout_detector.go +++ b/backend/pkg/monitoring/services/timeout_detector.go @@ -46,7 +46,7 @@ func (s *ServiceTimeoutDetector) runChecks() { // ignore return } - log.Debugf("checking services timeouts") + log.Tracef("checking services timeouts") query := ` with active_reports as ( From 9342701b41b3bd5b2858469ba1fa4823db1252dd Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Mon, 2 Sep 2024 13:46:32 +0200 Subject: [PATCH 030/187] BEDS-99: metrics: disable aggresive pprof (#813) * BEDS-99: metrics: disable aggresive pprof * BEDS-99: metrics: hide extra pprof behind flag --- backend/cmd/api/main.go | 2 +- backend/cmd/blobindexer/main.go | 2 +- backend/cmd/eth1indexer/main.go | 2 +- backend/cmd/ethstore_exporter/main.go | 2 +- backend/cmd/exporter/main.go | 2 +- backend/cmd/node_jobs_processor/main.go | 2 +- backend/cmd/notification_collector/main.go | 2 +- backend/cmd/notification_sender/main.go | 2 +- backend/cmd/rewards_exporter/main.go | 2 +- backend/cmd/signatures/main.go | 2 +- backend/cmd/statistics/main.go | 2 +- backend/cmd/user_service/main.go | 2 +- backend/pkg/commons/metrics/metrics.go | 11 +++++++---- backend/pkg/commons/types/config.go | 7 ++++--- 14 files changed, 23 insertions(+), 19 deletions(-) diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index bcc36a377..1a56b92d1 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -70,7 +70,7 @@ func Run() { router.Use(metrics.HttpMiddleware) go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/blobindexer/main.go b/backend/cmd/blobindexer/main.go index af49e3f5c..9f7d37b4a 100644 --- a/backend/cmd/blobindexer/main.go +++ b/backend/cmd/blobindexer/main.go @@ -31,7 +31,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/eth1indexer/main.go b/backend/cmd/eth1indexer/main.go index a88e1edd5..badcc7745 100644 --- a/backend/cmd/eth1indexer/main.go +++ b/backend/cmd/eth1indexer/main.go @@ -96,7 +96,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/ethstore_exporter/main.go b/backend/cmd/ethstore_exporter/main.go index 5a189a36d..502f91e2e 100644 --- a/backend/cmd/ethstore_exporter/main.go +++ b/backend/cmd/ethstore_exporter/main.go @@ -55,7 +55,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/exporter/main.go b/backend/cmd/exporter/main.go index 4f80cf773..2bf0a014e 100644 --- a/backend/cmd/exporter/main.go +++ b/backend/cmd/exporter/main.go @@ -46,7 +46,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/node_jobs_processor/main.go b/backend/cmd/node_jobs_processor/main.go index 04330e80b..427a1d97b 100644 --- a/backend/cmd/node_jobs_processor/main.go +++ b/backend/cmd/node_jobs_processor/main.go @@ -42,7 +42,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/notification_collector/main.go b/backend/cmd/notification_collector/main.go index 29d2d6a7f..428bc6588 100644 --- a/backend/cmd/notification_collector/main.go +++ b/backend/cmd/notification_collector/main.go @@ -55,7 +55,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/notification_sender/main.go b/backend/cmd/notification_sender/main.go index 4932e1345..e78508597 100644 --- a/backend/cmd/notification_sender/main.go +++ b/backend/cmd/notification_sender/main.go @@ -53,7 +53,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/rewards_exporter/main.go b/backend/cmd/rewards_exporter/main.go index 11894cfe8..874a33558 100644 --- a/backend/cmd/rewards_exporter/main.go +++ b/backend/cmd/rewards_exporter/main.go @@ -58,7 +58,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/signatures/main.go b/backend/cmd/signatures/main.go index f663db7aa..6eb7c216d 100644 --- a/backend/cmd/signatures/main.go +++ b/backend/cmd/signatures/main.go @@ -53,7 +53,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/statistics/main.go b/backend/cmd/statistics/main.go index 708b61adc..17f22e364 100644 --- a/backend/cmd/statistics/main.go +++ b/backend/cmd/statistics/main.go @@ -76,7 +76,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/cmd/user_service/main.go b/backend/cmd/user_service/main.go index 807b4c058..79ba47060 100644 --- a/backend/cmd/user_service/main.go +++ b/backend/cmd/user_service/main.go @@ -46,7 +46,7 @@ func Run() { if utils.Config.Metrics.Enabled { go func() { log.Infof("serving metrics on %v", utils.Config.Metrics.Address) - if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof); err != nil { + if err := metrics.Serve(utils.Config.Metrics.Address, utils.Config.Metrics.Pprof, utils.Config.Metrics.PprofExtra); err != nil { log.Fatal(err, "error serving metrics", 0) } }() diff --git a/backend/pkg/commons/metrics/metrics.go b/backend/pkg/commons/metrics/metrics.go index 7e65d28fd..2470124bb 100644 --- a/backend/pkg/commons/metrics/metrics.go +++ b/backend/pkg/commons/metrics/metrics.go @@ -129,7 +129,7 @@ func (r *responseWriterDelegator) Write(b []byte) (int, error) { } // Serve serves prometheus metrics on the given address under /metrics -func Serve(addr string, servePprof bool) error { +func Serve(addr string, servePprof bool, enableExtraPprof bool) error { router := http.NewServeMux() router.Handle("/metrics", promhttp.Handler()) router.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { @@ -147,9 +147,12 @@ func Serve(addr string, servePprof bool) error { if servePprof { log.Printf("serving pprof on %v/debug/pprof/", addr) - // enable some more aggressive pprof - runtime.SetBlockProfileRate(1) - runtime.SetMutexProfileFraction(1) + if enableExtraPprof { + // enables some extra pprof endpoints + runtime.SetCPUProfileRate(1) + runtime.SetBlockProfileRate(1) + runtime.SetMutexProfileFraction(1) + } router.HandleFunc("/debug/pprof/", pprof.Index) router.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) router.HandleFunc("/debug/pprof/profile", pprof.Profile) diff --git a/backend/pkg/commons/types/config.go b/backend/pkg/commons/types/config.go index a9e7dbf59..5518cb131 100644 --- a/backend/pkg/commons/types/config.go +++ b/backend/pkg/commons/types/config.go @@ -271,9 +271,10 @@ type Config struct { MainCurrency string `yaml:"mainCurrency" envconfig:"FRONTEND_MAIN_CURRENCY"` } `yaml:"frontend"` Metrics struct { - Enabled bool `yaml:"enabled" envconfig:"METRICS_ENABLED"` - Address string `yaml:"address" envconfig:"METRICS_ADDRESS"` - Pprof bool `yaml:"pprof" envconfig:"METRICS_PPROF"` + Enabled bool `yaml:"enabled" envconfig:"METRICS_ENABLED"` + Address string `yaml:"address" envconfig:"METRICS_ADDRESS"` + Pprof bool `yaml:"pprof" envconfig:"METRICS_PPROF"` + PprofExtra bool `yaml:"pprofExtra" envconfig:"METRICS_PPROF_EXTRA"` } `yaml:"metrics"` Notifications struct { UserDBNotifications bool `yaml:"userDbNotifications" envconfig:"USERDB_NOTIFICATIONS_ENABLED"` From 161652041681b02afa609ed8620731b301d1ff58 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 2 Sep 2024 14:46:24 +0200 Subject: [PATCH 031/187] fix(notifications): properly log error --- backend/pkg/notification/notifications.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 76bedf688..ba06f4132 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -166,7 +166,7 @@ func notificationSender() { if err != nil { log.Error(err, "error getting advisory lock from db", 0) - conn.Close() + err := conn.Close() if err != nil { log.Error(err, "error returning connection to connection pool", 0) } From 41534b18049262347140f4c080658ae09802f3f4 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Mon, 2 Sep 2024 15:21:09 +0200 Subject: [PATCH 032/187] Fill groups info --- backend/pkg/api/data_access/vdb_management.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index f783f6e6f..ebc36f834 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -313,7 +313,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d eg := errgroup.Group{} var err error // Groups - if dashboardId.Validators == nil && !dashboardId.AggregateGroups { + if dashboardId.Validators == nil { // should have valid primary id eg.Go(func() error { var queryResult []struct { @@ -333,11 +333,23 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d if err := d.alloyReader.SelectContext(ctx, &queryResult, query, dashboardId.Id); err != nil { return err } + + aggregateCount := uint64(0) for _, res := range queryResult { - data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: uint64(res.Id), Name: res.Name, Count: res.Count}) + if !dashboardId.AggregateGroups { + data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: uint64(res.Id), Name: res.Name, Count: res.Count}) + } else { + aggregateCount += res.Count + } } + if dashboardId.AggregateGroups { + data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: t.DefaultGroupId, Name: t.DefaultGroupName, Count: aggregateCount}) + } + return nil }) + } else { + data.Groups = []t.VDBOverviewGroup{{Id: t.DefaultGroupId, Name: t.DefaultGroupName, Count: uint64(len(dashboardId.Validators))}} } // Validator status and balance From 5343feb97c4303de44198c3a77f8a458c05f2e50 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Mon, 2 Sep 2024 15:28:40 +0200 Subject: [PATCH 033/187] Avoid an unnecessary query --- backend/pkg/api/data_access/vdb_management.go | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index ebc36f834..66e7c44f4 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -313,7 +313,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d eg := errgroup.Group{} var err error // Groups - if dashboardId.Validators == nil { + if dashboardId.Validators == nil && !dashboardId.AggregateGroups { // should have valid primary id eg.Go(func() error { var queryResult []struct { @@ -334,22 +334,12 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d return err } - aggregateCount := uint64(0) for _, res := range queryResult { - if !dashboardId.AggregateGroups { - data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: uint64(res.Id), Name: res.Name, Count: res.Count}) - } else { - aggregateCount += res.Count - } - } - if dashboardId.AggregateGroups { - data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: t.DefaultGroupId, Name: t.DefaultGroupName, Count: aggregateCount}) + data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: uint64(res.Id), Name: res.Name, Count: res.Count}) } return nil }) - } else { - data.Groups = []t.VDBOverviewGroup{{Id: t.DefaultGroupId, Name: t.DefaultGroupName, Count: uint64(len(dashboardId.Validators))}} } // Validator status and balance @@ -364,6 +354,10 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d return fmt.Errorf("error retrieving validators from dashboard id: %v", err) } + if dashboardId.Validators != nil || dashboardId.AggregateGroups { + data.Groups = append(data.Groups, t.VDBOverviewGroup{Id: t.DefaultGroupId, Name: t.DefaultGroupName, Count: uint64(len(validators))}) + } + // Status pubKeyList := make([][]byte, 0, len(validators)) for _, validator := range validators { From db4cac699b9fae4f14edc8525b57060e06712b72 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Mon, 2 Sep 2024 15:47:15 +0200 Subject: [PATCH 034/187] leveraging cascading delete --- backend/pkg/api/data_access/archiver.go | 43 +------------------ backend/pkg/api/data_access/vdb_management.go | 43 +------------------ 2 files changed, 4 insertions(+), 82 deletions(-) diff --git a/backend/pkg/api/data_access/archiver.go b/backend/pkg/api/data_access/archiver.go index 7c987b4d0..09fd605a3 100644 --- a/backend/pkg/api/data_access/archiver.go +++ b/backend/pkg/api/data_access/archiver.go @@ -7,7 +7,6 @@ import ( "github.com/doug-martin/goqu/v9" t "github.com/gobitfly/beaconchain/pkg/api/types" - "github.com/gobitfly/beaconchain/pkg/commons/utils" ) type ArchiverRepository interface { @@ -100,47 +99,9 @@ func (d *DataAccessService) UpdateValidatorDashboardsArchiving(ctx context.Conte } func (d *DataAccessService) RemoveValidatorDashboards(ctx context.Context, dashboardIds []uint64) error { - tx, err := d.alloyWriter.BeginTxx(ctx, nil) - if err != nil { - return fmt.Errorf("error starting db transactions to remove validator dashboards: %w", err) - } - defer utils.Rollback(tx) - // Delete the dashboard - _, err = tx.ExecContext(ctx, ` + _, err := d.writerDb.ExecContext(ctx, ` DELETE FROM users_val_dashboards WHERE id = ANY($1) `, dashboardIds) - if err != nil { - return err - } - - // Delete all groups for the dashboard - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_groups WHERE dashboard_id = ANY($1) - `, dashboardIds) - if err != nil { - return err - } - - // Delete all validators for the dashboard - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_validators WHERE dashboard_id = ANY($1) - `, dashboardIds) - if err != nil { - return err - } - - // Delete all shared dashboards for the dashboard - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_sharing WHERE dashboard_id = ANY($1) - `, dashboardIds) - if err != nil { - return err - } - - err = tx.Commit() - if err != nil { - return fmt.Errorf("error committing tx to remove validator dashboards: %w", err) - } - return nil + return err } diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 5d009a350..c021c44cf 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -229,49 +229,10 @@ func (d *DataAccessService) CreateValidatorDashboard(ctx context.Context, userId } func (d *DataAccessService) RemoveValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary) error { - tx, err := d.alloyWriter.BeginTxx(ctx, nil) - if err != nil { - return fmt.Errorf("error starting db transactions to remove a validator dashboard: %w", err) - } - defer utils.Rollback(tx) - - // Delete the dashboard - _, err = tx.ExecContext(ctx, ` + _, err := d.alloyWriter.ExecContext(ctx, ` DELETE FROM users_val_dashboards WHERE id = $1 `, dashboardId) - if err != nil { - return err - } - - // Delete all groups for the dashboard - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_groups WHERE dashboard_id = $1 - `, dashboardId) - if err != nil { - return err - } - - // Delete all validators for the dashboard - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_validators WHERE dashboard_id = $1 - `, dashboardId) - if err != nil { - return err - } - - // Delete all shared dashboards for the dashboard - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_sharing WHERE dashboard_id = $1 - `, dashboardId) - if err != nil { - return err - } - - err = tx.Commit() - if err != nil { - return fmt.Errorf("error committing tx to remove a validator dashboard: %w", err) - } - return nil + return err } func (d *DataAccessService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archivedReason *enums.VDBArchivedReason) (*t.VDBPostArchivingReturnData, error) { From 839112892fbcfa4db25174b6bba2d9b85608edf5 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Mon, 2 Sep 2024 16:15:14 +0200 Subject: [PATCH 035/187] Use cascade delete to simplify group removal --- backend/pkg/api/data_access/vdb_management.go | 26 ++----------------- 1 file changed, 2 insertions(+), 24 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index c021c44cf..31b15c5fc 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -543,33 +543,11 @@ func (d *DataAccessService) UpdateValidatorDashboardGroup(ctx context.Context, d } func (d *DataAccessService) RemoveValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64) error { - tx, err := d.alloyWriter.BeginTxx(ctx, nil) - if err != nil { - return fmt.Errorf("error starting db transactions to remove a validator dashboard group: %w", err) - } - defer utils.Rollback(tx) - // Delete the group - _, err = tx.ExecContext(ctx, ` + _, err := d.alloyWriter.ExecContext(ctx, ` DELETE FROM users_val_dashboards_groups WHERE dashboard_id = $1 AND id = $2 `, dashboardId, groupId) - if err != nil { - return err - } - - // Delete all validators for the group - _, err = tx.ExecContext(ctx, ` - DELETE FROM users_val_dashboards_validators WHERE dashboard_id = $1 AND group_id = $2 - `, dashboardId, groupId) - if err != nil { - return err - } - - err = tx.Commit() - if err != nil { - return fmt.Errorf("error committing tx to remove a validator dashboard group: %w", err) - } - return nil + return err } func (d *DataAccessService) GetValidatorDashboardGroupCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { From 0f08e90759c72b105727ac06d4962c833e0efe46 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Mon, 2 Sep 2024 17:45:36 +0200 Subject: [PATCH 036/187] Adjusted latest proposal check --- backend/pkg/api/data_access/vdb_slotviz.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_slotviz.go b/backend/pkg/api/data_access/vdb_slotviz.go index 0e77f7034..875b03214 100644 --- a/backend/pkg/api/data_access/vdb_slotviz.go +++ b/backend/pkg/api/data_access/vdb_slotviz.go @@ -19,7 +19,6 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da // Get min/max slot/epoch headEpoch := cache.LatestEpoch.Get() // Reminder: Currently it is possible to get the head epoch from the cache but nothing sets it in v2 - latestProposedSlot := cache.LatestProposedSlot.Get() slotsPerEpoch := utils.Config.Chain.ClConfig.SlotsPerEpoch minEpoch := uint64(0) @@ -35,6 +34,23 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da return nil, err } + latestProposedSlot := int64(-1) + for slot := dutiesInfo.LatestSlot; ; slot-- { + if _, ok := dutiesInfo.PropAssignmentsForSlot[slot]; ok { + if dutiesInfo.SlotStatus[slot] == 1 { + latestProposedSlot = int64(slot) + break + } + } else { + // No more data available + break + } + + if slot == 0 { + break + } + } + epochToIndexMap := make(map[uint64]uint64) slotToIndexMap := make(map[uint64]uint64) @@ -205,7 +221,7 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da } attestationsRef := slotVizEpochs[epochIdx].Slots[slotIdx].Attestations - if uint64(slot) >= latestProposedSlot { + if latestProposedSlot == -1 || uint64(slot) >= uint64(latestProposedSlot) { if attestationsRef.Scheduled == nil { attestationsRef.Scheduled = &t.VDBSlotVizDuty{} } From a854f0180e3c05e83a8f40a8b3b000585928d779 Mon Sep 17 00:00:00 2001 From: LUCCA DUKIC <109136188+LuccaBitfly@users.noreply.github.com> Date: Wed, 28 Aug 2024 14:30:34 +0200 Subject: [PATCH 037/187] (BEDS-143) add validator dashboard endpoints to public api --- backend/pkg/api/handlers/common.go | 2 +- backend/pkg/api/handlers/internal.go | 1308 ++---------------- backend/pkg/api/handlers/public.go | 1121 ++++++++++++++- backend/pkg/api/router.go | 10 +- backend/pkg/api/types/slot_viz.go | 2 +- backend/pkg/api/types/validator_dashboard.go | 46 +- frontend/types/api/slot_viz.ts | 2 +- frontend/types/api/validator_dashboard.ts | 46 +- 8 files changed, 1204 insertions(+), 1333 deletions(-) diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index b80d317eb..c3de88400 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -592,7 +592,7 @@ func (v *validationError) checkProtocolModes(protocolModes string) types.VDBProt func (v *validationError) checkValidatorList(validators string, allowEmpty bool) ([]types.VDBValidator, []string) { if validators == "" && !allowEmpty { - v.add("validators", "list of validators is must not be empty") + v.add("validators", "list of validators must not be empty") return nil, nil } validatorsSlice := splitParameters(validators, ',') diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index e5583477c..1c4157290 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -2,10 +2,8 @@ package handlers import ( "errors" - "fmt" "math" "net/http" - "reflect" "github.com/gobitfly/beaconchain/pkg/api/enums" types "github.com/gobitfly/beaconchain/pkg/api/types" @@ -254,20 +252,7 @@ func (h *HandlerService) InternalGetUserInfo(w http.ResponseWriter, r *http.Requ // Dashboards func (h *HandlerService) InternalGetUserDashboards(w http.ResponseWriter, r *http.Request) { - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetUserDashboards(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.ApiDataResponse[types.UserDashboardsData]{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserDashboards(w, r) } // -------------------------------------- @@ -333,1274 +318,147 @@ func (h *HandlerService) InternalPutAccountDashboardTransactionsSettings(w http. // Validator Dashboards func (h *HandlerService) InternalPostValidatorDashboards(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - req := struct { - Name string `json:"name"` - Network intOrString `json:"network"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - name := v.checkNameNotEmpty(req.Name) - chainId := v.checkNetwork(req.Network) - if v.hasErrors() { - handleErr(w, r, v) - return - } + h.PublicPostValidatorDashboards(w, r) +} - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, true) - if err != nil { - handleErr(w, r, err) - return - } - if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards && !isUserAdmin(userInfo) { - returnConflict(w, r, errors.New("maximum number of validator dashboards reached")) - return - } +func (h *HandlerService) InternalGetValidatorDashboard(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboard(w, r) +} - data, err := h.dai.CreateValidatorDashboard(r.Context(), userId, name, chainId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.ApiDataResponse[types.VDBPostReturnData]{ - Data: *data, - } - returnCreated(w, r, response) +func (h *HandlerService) InternalDeleteValidatorDashboard(w http.ResponseWriter, r *http.Request) { + h.PublicDeleteValidatorDashboard(w, r) } -func (h *HandlerService) InternalGetValidatorDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardIdParam := mux.Vars(r)["dashboard_id"] - dashboardId, err := h.handleDashboardId(r.Context(), dashboardIdParam) - if err != nil { - handleErr(w, r, err) - return - } +func (h *HandlerService) InternalPutValidatorDashboardName(w http.ResponseWriter, r *http.Request) { + h.PublicPutValidatorDashboardName(w, r) +} - q := r.URL.Query() - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } +func (h *HandlerService) InternalPostValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { + h.PublicPostValidatorDashboardGroups(w, r) +} - // set name depending on dashboard id - var name string - if reInteger.MatchString(dashboardIdParam) { - name, err = h.dai.GetValidatorDashboardName(r.Context(), dashboardId.Id) - } else if reValidatorDashboardPublicId.MatchString(dashboardIdParam) { - var publicIdInfo *types.VDBPublicId - publicIdInfo, err = h.dai.GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) - name = publicIdInfo.Name - } - if err != nil { - handleErr(w, r, err) - return - } +func (h *HandlerService) InternalPutValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { + h.PublicPutValidatorDashboardGroups(w, r) +} - // add premium chart perk info for shared dashboards - premiumPerks, err := h.getDashboardPremiumPerks(r.Context(), *dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - data.ChartHistorySeconds = premiumPerks.ChartHistorySeconds - data.Name = name +func (h *HandlerService) InternalDeleteValidatorDashboardGroup(w http.ResponseWriter, r *http.Request) { + h.PublicDeleteValidatorDashboardGroup(w, r) +} - response := types.InternalGetValidatorDashboardResponse{ - Data: *data, - } +func (h *HandlerService) InternalPostValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { + h.PublicPostValidatorDashboardValidators(w, r) +} - returnOk(w, r, response) +func (h *HandlerService) InternalGetValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardValidators(w, r) } -func (h *HandlerService) InternalDeleteValidatorDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.RemoveValidatorDashboard(r.Context(), dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - returnNoContent(w, r) +func (h *HandlerService) InternalDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { + h.PublicDeleteValidatorDashboardValidators(w, r) +} + +func (h *HandlerService) InternalPostValidatorDashboardPublicIds(w http.ResponseWriter, r *http.Request) { + h.PublicPostValidatorDashboardPublicIds(w, r) +} + +func (h *HandlerService) InternalPutValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { + h.PublicPutValidatorDashboardPublicId(w, r) +} + +func (h *HandlerService) InternalDeleteValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { + h.PublicDeleteValidatorDashboardPublicId(w, r) } func (h *HandlerService) InternalPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - IsArchived bool `json:"is_archived"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - if v.hasErrors() { - handleErr(w, r, v) - return - } + h.PublicPutValidatorDashboardArchiving(w, r) +} - // check conditions for changing archival status - dashboardInfo, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - if dashboardInfo.IsArchived == req.IsArchived { - // nothing to do - returnOk(w, r, types.ApiDataResponse[types.VDBPostArchivingReturnData]{ - Data: types.VDBPostArchivingReturnData{Id: uint64(dashboardId), IsArchived: req.IsArchived}, - }) - return - } +func (h *HandlerService) InternalGetValidatorDashboardSlotViz(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardSlotViz(w, r) +} - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) - if err != nil { - handleErr(w, r, err) - return - } +func (h *HandlerService) InternalGetValidatorDashboardSummary(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardSummary(w, r) +} - userInfo, err := h.dai.GetUserInfo(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - if !isUserAdmin(userInfo) { - if req.IsArchived { - if dashboardCount >= maxArchivedDashboardsCount { - returnConflict(w, r, errors.New("maximum number of archived validator dashboards reached")) - return - } - } else { - if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards { - returnConflict(w, r, errors.New("maximum number of active validator dashboards reached")) - return - } - if dashboardInfo.GroupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { - returnConflict(w, r, errors.New("maximum number of groups in dashboards reached")) - return - } - if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { - returnConflict(w, r, errors.New("maximum number of validators in dashboards reached")) - return - } - } - } +func (h *HandlerService) InternalGetValidatorDashboardGroupSummary(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardGroupSummary(w, r) +} - data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, req.IsArchived) - if err != nil { - handleErr(w, r, err) - return - } - response := types.ApiDataResponse[types.VDBPostArchivingReturnData]{ - Data: *data, - } - returnOk(w, r, response) +func (h *HandlerService) InternalGetValidatorDashboardSummaryChart(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardSummaryChart(w, r) } -func (h *HandlerService) InternalPutValidatorDashboardName(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - Name string `json:"name"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - name := v.checkNameNotEmpty(req.Name) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, err := h.dai.UpdateValidatorDashboardName(r.Context(), dashboardId, name) - if err != nil { - handleErr(w, r, err) - return - } - response := types.ApiDataResponse[types.VDBPostReturnData]{ - Data: *data, - } - returnOk(w, r, response) +func (h *HandlerService) InternalGetValidatorDashboardSummaryValidators(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardSummaryValidators(w, r) } -func (h *HandlerService) InternalPostValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - Name string `json:"name"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - name := v.checkNameNotEmpty(req.Name) - if v.hasErrors() { - handleErr(w, r, v) - return - } - ctx := r.Context() - // check if user has reached the maximum number of groups - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - userInfo, err := h.dai.GetUserInfo(ctx, userId) - if err != nil { - handleErr(w, r, err) - return - } - groupCount, err := h.dai.GetValidatorDashboardGroupCount(ctx, dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - if groupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard && !isUserAdmin(userInfo) { - returnConflict(w, r, errors.New("maximum number of validator dashboard groups reached")) - return - } +func (h *HandlerService) InternalGetValidatorDashboardRewards(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardRewards(w, r) +} - data, err := h.dai.CreateValidatorDashboardGroup(ctx, dashboardId, name) - if err != nil { - handleErr(w, r, err) - return - } +func (h *HandlerService) InternalGetValidatorDashboardGroupRewards(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardGroupRewards(w, r) +} - response := types.ApiResponse{ - Data: data, - } +func (h *HandlerService) InternalGetValidatorDashboardRewardsChart(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardRewardsChart(w, r) +} - returnCreated(w, r, response) +func (h *HandlerService) InternalGetValidatorDashboardDuties(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardDuties(w, r) } -func (h *HandlerService) InternalPutValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) - groupId := v.checkExistingGroupId(vars["group_id"]) - req := struct { - Name string `json:"name"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - name := v.checkNameNotEmpty(req.Name) - if v.hasErrors() { - handleErr(w, r, v) - return - } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) - if err != nil { - handleErr(w, r, err) - return - } - if !groupExists { - returnNotFound(w, r, errors.New("group not found")) - return - } - data, err := h.dai.UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) - if err != nil { - handleErr(w, r, err) - return - } +func (h *HandlerService) InternalGetValidatorDashboardBlocks(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardBlocks(w, r) +} - response := types.ApiResponse{ - Data: data, - } +func (h *HandlerService) InternalGetValidatorDashboardHeatmap(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardHeatmap(w, r) +} - returnOk(w, r, response) -} - -func (h *HandlerService) InternalDeleteValidatorDashboardGroup(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - groupId := v.checkExistingGroupId(vars["group_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - if groupId == types.DefaultGroupId { - returnBadRequest(w, r, errors.New("cannot delete default group")) - return - } - groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) - if err != nil { - handleErr(w, r, err) - return - } - if !groupExists { - returnNotFound(w, r, errors.New("group not found")) - return - } - err = h.dai.RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) - if err != nil { - handleErr(w, r, err) - return - } - - returnNoContent(w, r) -} - -func (h *HandlerService) InternalPostValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - GroupId uint64 `json:"group_id,omitempty"` - Validators []intOrString `json:"validators,omitempty"` - DepositAddress string `json:"deposit_address,omitempty"` - WithdrawalAddress string `json:"withdrawal_address,omitempty"` - Graffiti string `json:"graffiti,omitempty"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - if v.hasErrors() { - handleErr(w, r, v) - return - } - // check if exactly one of validators, deposit_address, withdrawal_address, graffiti is set - fields := []interface{}{req.Validators, req.DepositAddress, req.WithdrawalAddress, req.Graffiti} - var count int - for _, set := range fields { - if !reflect.ValueOf(set).IsZero() { - count++ - } - } - if count != 1 { - v.add("request body", "exactly one of `validators`, `deposit_address`, `withdrawal_address`, `graffiti` must be set. please check the API documentation for more information") - } - if v.hasErrors() { - handleErr(w, r, v) - return - } - - groupId := req.GroupId - ctx := r.Context() - groupExists, err := h.dai.GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) - if err != nil { - handleErr(w, r, err) - return - } - if !groupExists { - returnNotFound(w, r, errors.New("group not found")) - return - } - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - userInfo, err := h.dai.GetUserInfo(ctx, userId) - if err != nil { - handleErr(w, r, err) - return - } - limit := userInfo.PremiumPerks.ValidatorsPerDashboard - if req.Validators == nil && !userInfo.PremiumPerks.BulkAdding && !isUserAdmin(userInfo) { - returnConflict(w, r, errors.New("bulk adding not allowed with current subscription plan")) - return - } - var data []types.VDBPostValidatorsData - var dataErr error - switch { - case req.Validators != nil: - indices, pubkeys := v.checkValidators(req.Validators, forbidEmpty) - if v.hasErrors() { - handleErr(w, r, v) - return - } - validators, err := h.dai.GetValidatorsFromSlices(indices, pubkeys) - if err != nil { - handleErr(w, r, err) - return - } - // check if adding more validators than allowed - existingValidatorCount, err := h.dai.GetValidatorDashboardExistingValidatorCount(ctx, dashboardId, validators) - if err != nil { - handleErr(w, r, err) - return - } - if uint64(len(validators)) > existingValidatorCount+limit { - returnConflict(w, r, fmt.Errorf("adding more validators than allowed, limit is %v new validators", limit)) - return - } - data, dataErr = h.dai.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validators) - - case req.DepositAddress != "": - depositAddress := v.checkRegex(reEthereumAddress, req.DepositAddress, "deposit_address") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByDepositAddress(ctx, dashboardId, groupId, depositAddress, limit) - - case req.WithdrawalAddress != "": - withdrawalAddress := v.checkRegex(reWithdrawalCredential, req.WithdrawalAddress, "withdrawal_address") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByWithdrawalAddress(ctx, dashboardId, groupId, withdrawalAddress, limit) - - case req.Graffiti != "": - graffiti := v.checkRegex(reNonEmpty, req.Graffiti, "graffiti") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, dataErr = h.dai.AddValidatorDashboardValidatorsByGraffiti(ctx, dashboardId, groupId, graffiti, limit) - } - - if dataErr != nil { - handleErr(w, r, dataErr) - return - } - response := types.ApiResponse{ - Data: data, - } - - returnCreated(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - groupId := v.checkGroupId(q.Get("group_id"), allowEmpty) - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBManageValidatorsColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardValidatorsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - var indices []uint64 - var publicKeys []string - if validatorsParam := r.URL.Query().Get("validators"); validatorsParam != "" { - indices, publicKeys = v.checkValidatorList(validatorsParam, allowEmpty) - if v.hasErrors() { - handleErr(w, r, v) - return - } - } - validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) - if err != nil { - handleErr(w, r, err) - return - } - err = h.dai.RemoveValidatorDashboardValidators(r.Context(), dashboardId, validators) - if err != nil { - handleErr(w, r, err) - return - } - - returnNoContent(w, r) -} - -func (h *HandlerService) InternalPostValidatorDashboardPublicIds(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - Name string `json:"name,omitempty"` - ShareSettings struct { - ShareGroups bool `json:"share_groups"` - } `json:"share_settings"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - name := v.checkName(req.Name, 0) - if v.hasErrors() { - handleErr(w, r, v) - return - } - publicIdCount, err := h.dai.GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - if publicIdCount >= 1 { - returnConflict(w, r, errors.New("cannot create more than one public id")) - return - } - - data, err := h.dai.CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) - if err != nil { - handleErr(w, r, err) - return - } - response := types.ApiResponse{ - Data: data, - } - - returnCreated(w, r, response) -} - -func (h *HandlerService) InternalPutValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - Name string `json:"name,omitempty"` - ShareSettings struct { - ShareGroups bool `json:"share_groups"` - } `json:"share_settings"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - name := v.checkName(req.Name, 0) - publicDashboardId := v.checkValidatorDashboardPublicId(vars["public_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) - if err != nil { - handleErr(w, r, err) - return - } - if *fetchedId != dashboardId { - handleErr(w, r, newNotFoundErr("public id %v not found", publicDashboardId)) - return - } - - data, err := h.dai.UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) - if err != nil { - handleErr(w, r, err) - return - } - response := types.ApiResponse{ - Data: data, - } - - returnOk(w, r, response) -} - -func (h *HandlerService) InternalDeleteValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) - publicDashboardId := v.checkValidatorDashboardPublicId(vars["public_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) - if err != nil { - handleErr(w, r, err) - return - } - if *fetchedId != dashboardId { - handleErr(w, r, newNotFoundErr("public id %v not found", publicDashboardId)) - return - } - - err = h.dai.RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) - if err != nil { - handleErr(w, r, err) - return - } - - returnNoContent(w, r) -} - -func (h *HandlerService) InternalGetValidatorDashboardSlotViz(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - - groupIds := v.checkExistingGroupIdList(r.URL.Query().Get("group_ids")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, err := h.dai.GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardSlotVizResponse{ - Data: data, - } - - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardSummary(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBSummaryColumn](&v, q.Get("sort")) - protocolModes := v.checkProtocolModes(q.Get("modes")) - - period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardSummaryResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardGroupSummary(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - q := r.URL.Query() - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - if err != nil { - handleErr(w, r, err) - return - } - groupId := v.checkGroupId(vars["group_id"], forbidEmpty) - period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardGroupSummaryResponse{ - Data: *data, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardSummaryChart(w http.ResponseWriter, r *http.Request) { - var v validationError - ctx := r.Context() - dashboardId, err := h.handleDashboardId(ctx, mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - groupIds := v.checkGroupIdList(q.Get("group_ids")) - efficiencyType := checkEnum[enums.VDBSummaryChartEfficiencyType](&v, q.Get("efficiency_type"), "efficiency_type") - - aggregation := checkEnum[enums.ChartAggregation](&v, r.URL.Query().Get("aggregation"), "aggregation") - chartLimits, err := h.getCurrentChartTimeLimitsForDashboard(ctx, dashboardId, aggregation) - if err != nil { - handleErr(w, r, err) - return - } - afterTs, beforeTs := v.checkTimestamps(r, chartLimits) - if v.hasErrors() { - handleErr(w, r, v) - return - } - if afterTs < chartLimits.MinAllowedTs || beforeTs < chartLimits.MinAllowedTs { - returnConflict(w, r, fmt.Errorf("requested time range is too old, minimum timestamp for dashboard owner's premium subscription for this aggregation is %v", chartLimits.MinAllowedTs)) - return - } - - data, err := h.dai.GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardSummaryChartResponse{ - Data: *data, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardSummaryValidators(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - groupId := v.checkGroupId(r.URL.Query().Get("group_id"), allowEmpty) - q := r.URL.Query() - duty := checkEnum[enums.ValidatorDuty](&v, q.Get("duty"), "duty") - period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") - // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") - if v.hasErrors() { - handleErr(w, r, v) - return - } - - // get indices based on duty - var indices interface{} - duties := enums.ValidatorDuties - switch duty { - case duties.None: - indices, err = h.dai.GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) - case duties.Sync: - indices, err = h.dai.GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) - case duties.Slashed: - indices, err = h.dai.GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) - case duties.Proposal: - indices, err = h.dai.GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) - } - if err != nil { - handleErr(w, r, err) - return - } - // map indices to response format - data, err := mapVDBIndices(indices) - if err != nil { - handleErr(w, r, err) - return - } - - response := types.InternalGetValidatorDashboardSummaryValidatorsResponse{ - Data: data, - } - - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardRewards(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBRewardsColumn](&v, q.Get("sort")) - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardRewardsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardGroupRewards(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - groupId := v.checkGroupId(vars["group_id"], forbidEmpty) - epoch := v.checkUint(vars["epoch"], "epoch") - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardGroupRewardsResponse{ - Data: *data, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardRewardsChart(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardRewardsChartResponse{ - Data: *data, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardDuties(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - groupId := v.checkGroupId(q.Get("group_id"), allowEmpty) - epoch := v.checkUint(vars["epoch"], "epoch") - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBDutiesColumn](&v, q.Get("sort")) - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardDutiesResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardBlocks(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBBlocksColumn](&v, q.Get("sort")) - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardBlocksResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardHeatmap(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - protocolModes := v.checkProtocolModes(q.Get("modes")) - aggregation := checkEnum[enums.ChartAggregation](&v, r.URL.Query().Get("aggregation"), "aggregation") - chartLimits, err := h.getCurrentChartTimeLimitsForDashboard(r.Context(), dashboardId, aggregation) - if err != nil { - handleErr(w, r, err) - return - } - afterTs, beforeTs := v.checkTimestamps(r, chartLimits) - if v.hasErrors() { - handleErr(w, r, v) - return - } - if afterTs < chartLimits.MinAllowedTs || beforeTs < chartLimits.MinAllowedTs { - returnConflict(w, r, fmt.Errorf("requested time range is too old, minimum timestamp for dashboard owner's premium subscription for this aggregation is %v", chartLimits.MinAllowedTs)) - return - } - - data, err := h.dai.GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardHeatmapResponse{ - Data: *data, - } - returnOk(w, r, response) -} - -func (h *HandlerService) InternalGetValidatorDashboardGroupHeatmap(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - groupId := v.checkExistingGroupId(vars["group_id"]) - requestedTimestamp := v.checkUint(vars["timestamp"], "timestamp") - protocolModes := v.checkProtocolModes(r.URL.Query().Get("modes")) - aggregation := checkEnum[enums.ChartAggregation](&v, r.URL.Query().Get("aggregation"), "aggregation") - if v.hasErrors() { - handleErr(w, r, v) - return - } - chartLimits, err := h.getCurrentChartTimeLimitsForDashboard(r.Context(), dashboardId, aggregation) - if err != nil { - handleErr(w, r, err) - return - } - if requestedTimestamp < chartLimits.MinAllowedTs || requestedTimestamp > chartLimits.LatestExportedTs { - handleErr(w, r, newConflictErr("requested timestamp is outside of allowed chart history for dashboard owner's premium subscription")) - return - } - - data, err := h.dai.GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardGroupHeatmapResponse{ - Data: *data, - } - returnOk(w, r, response) +func (h *HandlerService) InternalGetValidatorDashboardGroupHeatmap(w http.ResponseWriter, r *http.Request) { + h.PublicGetValidatorDashboardGroupHeatmap(w, r) } func (h *HandlerService) InternalGetValidatorDashboardExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - pagingParams := v.checkPagingParams(r.URL.Query()) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardExecutionLayerDepositsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardExecutionLayerDeposits(w, r) } func (h *HandlerService) InternalGetValidatorDashboardConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { - var v validationError - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - pagingParams := v.checkPagingParams(r.URL.Query()) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - - response := types.InternalGetValidatorDashboardConsensusLayerDepositsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardConsensusLayerDeposits(w, r) } func (h *HandlerService) InternalGetValidatorDashboardTotalConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { - var err error - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - - response := types.InternalGetValidatorDashboardTotalConsensusDepositsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardTotalConsensusLayerDeposits(w, r) } func (h *HandlerService) InternalGetValidatorDashboardTotalExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { - var err error - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) - if err != nil { - handleErr(w, r, err) - return - } - - response := types.InternalGetValidatorDashboardTotalExecutionDepositsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardTotalExecutionLayerDeposits(w, r) } func (h *HandlerService) InternalGetValidatorDashboardWithdrawals(w http.ResponseWriter, r *http.Request) { - var v validationError - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBWithdrawalsColumn](&v, q.Get("sort")) - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardWithdrawalsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardWithdrawals(w, r) } func (h *HandlerService) InternalGetValidatorDashboardTotalWithdrawals(w http.ResponseWriter, r *http.Request) { - var v validationError - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - pagingParams := v.checkPagingParams(q) - protocolModes := v.checkProtocolModes(q.Get("modes")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) - if err != nil { - handleErr(w, r, err) - return - } - - response := types.InternalGetValidatorDashboardTotalWithdrawalsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardTotalWithdrawals(w, r) } func (h *HandlerService) InternalGetValidatorDashboardRocketPool(w http.ResponseWriter, r *http.Request) { - var v validationError - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBRocketPoolColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardRocketPoolResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardRocketPool(w, r) } func (h *HandlerService) InternalGetValidatorDashboardTotalRocketPool(w http.ResponseWriter, r *http.Request) { - var v validationError - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - pagingParams := v.checkPagingParams(q) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardTotalRocketPoolResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardTotalRocketPool(w, r) } func (h *HandlerService) InternalGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - // support ENS names ? - nodeAddress := v.checkAddress(vars["node_address"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, err := h.dai.GetValidatorDashboardNodeRocketPool(r.Context(), *dashboardId, nodeAddress) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardNodeRocketPoolResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardNodeRocketPool(w, r) } func (h *HandlerService) InternalGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { - var v validationError - vars := mux.Vars(r) - q := r.URL.Query() - dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) - if err != nil { - handleErr(w, r, err) - return - } - // support ENS names ? - nodeAddress := v.checkAddress(vars["node_address"]) - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.VDBRocketPoolMinipoolsColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - - data, paging, err := h.dai.GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetValidatorDashboardRocketPoolMinipoolsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetValidatorDashboardRocketPoolMinipools(w, r) } // -------------------------------------- diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 2251677a4..28d17bb39 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -8,6 +8,7 @@ import ( "reflect" "time" + "github.com/gobitfly/beaconchain/pkg/api/enums" "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gorilla/mux" ) @@ -112,43 +113,281 @@ func (h *HandlerService) PublicPutAccountDashboardTransactionsSettings(w http.Re } func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r *http.Request) { - returnCreated(w, r, nil) + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + + type request struct { + Name string `json:"name"` + Network intOrString `json:"network" swaggertype:"string" enums:"ethereum,gnosis"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + name := v.checkNameNotEmpty(req.Name) + chainId := v.checkNetwork(req.Network) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, true) + if err != nil { + handleErr(w, r, err) + return + } + if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards && !isUserAdmin(userInfo) { + returnConflict(w, r, errors.New("maximum number of validator dashboards reached")) + return + } + + data, err := h.dai.CreateValidatorDashboard(r.Context(), userId, name, chainId) + if err != nil { + handleErr(w, r, err) + return + } + response := types.ApiDataResponse[types.VDBPostReturnData]{ + Data: *data, + } + returnCreated(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardIdParam := mux.Vars(r)["dashboard_id"] + dashboardId, err := h.handleDashboardId(r.Context(), dashboardIdParam) + if err != nil { + handleErr(w, r, err) + return + } + + q := r.URL.Query() + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + // set name depending on dashboard id + var name string + if reInteger.MatchString(dashboardIdParam) { + name, err = h.dai.GetValidatorDashboardName(r.Context(), dashboardId.Id) + } else if reValidatorDashboardPublicId.MatchString(dashboardIdParam) { + var publicIdInfo *types.VDBPublicId + publicIdInfo, err = h.dai.GetValidatorDashboardPublicId(r.Context(), types.VDBIdPublic(dashboardIdParam)) + name = publicIdInfo.Name + } + if err != nil { + handleErr(w, r, err) + return + } + + // add premium chart perk info for shared dashboards + premiumPerks, err := h.getDashboardPremiumPerks(r.Context(), *dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.dai.GetValidatorDashboardOverview(r.Context(), *dashboardId, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + data.ChartHistorySeconds = premiumPerks.ChartHistorySeconds + data.Name = name + + response := types.GetValidatorDashboardResponse{ + Data: *data, + } + + returnOk(w, r, response) } func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + err := h.dai.RemoveValidatorDashboard(r.Context(), dashboardId) + if err != nil { + handleErr(w, r, err) + return + } returnNoContent(w, r) } -func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { - returnNoContent(w, r) +func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + type request struct { + Name string `json:"name"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + name := v.checkNameNotEmpty(req.Name) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.dai.UpdateValidatorDashboardName(r.Context(), dashboardId, name) + if err != nil { + handleErr(w, r, err) + return + } + response := types.ApiDataResponse[types.VDBPostReturnData]{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { - returnCreated(w, r, nil) + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + type request struct { + Name string `json:"name"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + name := v.checkNameNotEmpty(req.Name) + if v.hasErrors() { + handleErr(w, r, v) + return + } + ctx := r.Context() + // check if user has reached the maximum number of groups + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + userInfo, err := h.dai.GetUserInfo(ctx, userId) + if err != nil { + handleErr(w, r, err) + return + } + groupCount, err := h.dai.GetValidatorDashboardGroupCount(ctx, dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + if groupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard && !isUserAdmin(userInfo) { + returnConflict(w, r, errors.New("maximum number of validator dashboard groups reached")) + return + } + + data, err := h.dai.CreateValidatorDashboardGroup(ctx, dashboardId, name) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.ApiDataResponse[types.VDBPostCreateGroupData]{ + Data: *data, + } + + returnCreated(w, r, response) } func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { - returnCreated(w, r, nil) + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + type request struct { + Name string `json:"name"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + name := v.checkNameNotEmpty(req.Name) + if v.hasErrors() { + handleErr(w, r, v) + return + } + groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + if err != nil { + handleErr(w, r, err) + return + } + if !groupExists { + returnNotFound(w, r, errors.New("group not found")) + return + } + data, err := h.dai.UpdateValidatorDashboardGroup(r.Context(), dashboardId, groupId, name) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.ApiDataResponse[types.VDBPostCreateGroupData]{ + Data: *data, + } + + returnOk(w, r, response) } func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + if groupId == types.DefaultGroupId { + returnBadRequest(w, r, errors.New("cannot delete default group")) + return + } + groupExists, err := h.dai.GetValidatorDashboardGroupExists(r.Context(), dashboardId, groupId) + if err != nil { + handleErr(w, r, err) + return + } + if !groupExists { + returnNotFound(w, r, errors.New("group not found")) + return + } + err = h.dai.RemoveValidatorDashboardGroup(r.Context(), dashboardId, groupId) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) } func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { - GroupId uint64 `json:"group_id,omitempty"` + type request struct { + GroupId uint64 `json:"group_id,omitempty" x-nullable:"true"` Validators []intOrString `json:"validators,omitempty"` DepositAddress string `json:"deposit_address,omitempty"` WithdrawalAddress string `json:"withdrawal_address,omitempty"` Graffiti string `json:"graffiti,omitempty"` - }{} + } + var req request if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return @@ -173,8 +412,8 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW return } - ctx := r.Context() groupId := req.GroupId + ctx := r.Context() groupExists, err := h.dai.GetValidatorDashboardGroupExists(ctx, dashboardId, groupId) if err != nil { handleErr(w, r, err) @@ -254,7 +493,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW handleErr(w, r, dataErr) return } - response := types.ApiResponse{ + response := types.ApiDataResponse[[]types.VDBPostValidatorsData]{ Data: data, } @@ -262,20 +501,39 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW } func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + groupId := v.checkGroupId(q.Get("group_id"), allowEmpty) + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBManageValidatorsColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetValidatorDashboardValidators(r.Context(), *dashboardId, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardValidatorsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - var indices []uint64 - var publicKeys []string - if validatorsParam := r.URL.Query().Get("validators"); validatorsParam != "" { - indices, publicKeys = v.checkValidatorList(validatorsParam, allowEmpty) - if v.hasErrors() { - handleErr(w, r, v) - return - } + indices, publicKeys := v.checkValidatorList(r.URL.Query().Get("validators"), forbidEmpty) + if v.hasErrors() { + handleErr(w, r, v) + return } validators, err := h.dai.GetValidatorsFromSlices(indices, publicKeys) if err != nil { @@ -292,87 +550,842 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons } func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWriter, r *http.Request) { - returnCreated(w, r, nil) -} - -func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) -} - -func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { - returnNoContent(w, r) -} - -func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) -} + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + type request struct { + Name string `json:"name,omitempty"` + ShareSettings struct { + ShareGroups bool `json:"share_groups"` + } `json:"share_settings"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + name := v.checkName(req.Name, 0) + if v.hasErrors() { + handleErr(w, r, v) + return + } + publicIdCount, err := h.dai.GetValidatorDashboardPublicIdCount(r.Context(), dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + if publicIdCount >= 1 { + returnConflict(w, r, errors.New("cannot create more than one public id")) + return + } -func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) -} + data, err := h.dai.CreateValidatorDashboardPublicId(r.Context(), dashboardId, name, req.ShareSettings.ShareGroups) + if err != nil { + handleErr(w, r, err) + return + } + response := types.ApiResponse{ + Data: data, + } -func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + returnCreated(w, r, response) +} + +func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + type request struct { + Name string `json:"name,omitempty"` + ShareSettings struct { + ShareGroups bool `json:"share_groups"` + } `json:"share_settings"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + name := v.checkName(req.Name, 0) + publicDashboardId := v.checkValidatorDashboardPublicId(vars["public_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + if err != nil { + handleErr(w, r, err) + return + } + if *fetchedId != dashboardId { + handleErr(w, r, newNotFoundErr("public id %v not found", publicDashboardId)) + return + } + + data, err := h.dai.UpdateValidatorDashboardPublicId(r.Context(), publicDashboardId, name, req.ShareSettings.ShareGroups) + if err != nil { + handleErr(w, r, err) + return + } + response := types.ApiResponse{ + Data: data, + } + + returnOk(w, r, response) +} + +func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + publicDashboardId := v.checkValidatorDashboardPublicId(vars["public_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + fetchedId, err := h.dai.GetValidatorDashboardIdByPublicId(r.Context(), publicDashboardId) + if err != nil { + handleErr(w, r, err) + return + } + if *fetchedId != dashboardId { + handleErr(w, r, newNotFoundErr("public id %v not found", publicDashboardId)) + return + } + + err = h.dai.RemoveValidatorDashboardPublicId(r.Context(), publicDashboardId) + if err != nil { + handleErr(w, r, err) + return + } + + returnNoContent(w, r) +} + +func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + type request struct { + IsArchived bool `json:"is_archived"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + if v.hasErrors() { + handleErr(w, r, v) + return + } + + // check conditions for changing archival status + dashboardInfo, err := h.dai.GetValidatorDashboardInfo(r.Context(), dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + if dashboardInfo.IsArchived == req.IsArchived { + // nothing to do + returnOk(w, r, types.ApiDataResponse[types.VDBPostArchivingReturnData]{ + Data: types.VDBPostArchivingReturnData{Id: uint64(dashboardId), IsArchived: req.IsArchived}, + }) + return + } + + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + dashboardCount, err := h.dai.GetUserValidatorDashboardCount(r.Context(), userId, !req.IsArchived) + if err != nil { + handleErr(w, r, err) + return + } + + userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + if !isUserAdmin(userInfo) { + if req.IsArchived { + if dashboardCount >= maxArchivedDashboardsCount { + returnConflict(w, r, errors.New("maximum number of archived validator dashboards reached")) + return + } + } else { + if dashboardCount >= userInfo.PremiumPerks.ValidatorDasboards { + returnConflict(w, r, errors.New("maximum number of active validator dashboards reached")) + return + } + if dashboardInfo.GroupCount >= userInfo.PremiumPerks.ValidatorGroupsPerDashboard { + returnConflict(w, r, errors.New("maximum number of groups in dashboards reached")) + return + } + if dashboardInfo.ValidatorCount >= userInfo.PremiumPerks.ValidatorsPerDashboard { + returnConflict(w, r, errors.New("maximum number of validators in dashboards reached")) + return + } + } + } + + data, err := h.dai.UpdateValidatorDashboardArchiving(r.Context(), dashboardId, req.IsArchived) + if err != nil { + handleErr(w, r, err) + return + } + response := types.ApiDataResponse[types.VDBPostArchivingReturnData]{ + Data: *data, + } + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + + groupIds := v.checkExistingGroupIdList(r.URL.Query().Get("group_ids")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.dai.GetValidatorDashboardSlotViz(r.Context(), *dashboardId, groupIds) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardSlotVizResponse{ + Data: data, + } + + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBSummaryColumn](&v, q.Get("sort")) + protocolModes := v.checkProtocolModes(q.Get("modes")) + + period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") + // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h + allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + v.checkEnumIsAllowed(period, allowedPeriods, "period") + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardSummary(r.Context(), *dashboardId, period, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardSummaryResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + q := r.URL.Query() + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + if err != nil { + handleErr(w, r, err) + return + } + groupId := v.checkGroupId(vars["group_id"], forbidEmpty) + period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") + // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h + allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + v.checkEnumIsAllowed(period, allowedPeriods, "period") + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, err := h.dai.GetValidatorDashboardGroupSummary(r.Context(), *dashboardId, groupId, period, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardGroupSummaryResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + ctx := r.Context() + dashboardId, err := h.handleDashboardId(ctx, mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + groupIds := v.checkGroupIdList(q.Get("group_ids")) + efficiencyType := checkEnum[enums.VDBSummaryChartEfficiencyType](&v, q.Get("efficiency_type"), "efficiency_type") + + aggregation := checkEnum[enums.ChartAggregation](&v, r.URL.Query().Get("aggregation"), "aggregation") + chartLimits, err := h.getCurrentChartTimeLimitsForDashboard(ctx, dashboardId, aggregation) + if err != nil { + handleErr(w, r, err) + return + } + afterTs, beforeTs := v.checkTimestamps(r, chartLimits) + if v.hasErrors() { + handleErr(w, r, v) + return + } + if afterTs < chartLimits.MinAllowedTs || beforeTs < chartLimits.MinAllowedTs { + returnConflict(w, r, fmt.Errorf("requested time range is too old, minimum timestamp for dashboard owner's premium subscription for this aggregation is %v", chartLimits.MinAllowedTs)) + return + } + + data, err := h.dai.GetValidatorDashboardSummaryChart(ctx, *dashboardId, groupIds, efficiencyType, aggregation, afterTs, beforeTs) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardSummaryChartResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + groupId := v.checkGroupId(r.URL.Query().Get("group_id"), allowEmpty) + q := r.URL.Query() + duty := checkEnum[enums.ValidatorDuty](&v, q.Get("duty"), "duty") + period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") + // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h + allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + v.checkEnumIsAllowed(period, allowedPeriods, "period") + if v.hasErrors() { + handleErr(w, r, v) + return + } + + // get indices based on duty + var indices interface{} + duties := enums.ValidatorDuties + switch duty { + case duties.None: + indices, err = h.dai.GetValidatorDashboardSummaryValidators(r.Context(), *dashboardId, groupId) + case duties.Sync: + indices, err = h.dai.GetValidatorDashboardSyncSummaryValidators(r.Context(), *dashboardId, groupId, period) + case duties.Slashed: + indices, err = h.dai.GetValidatorDashboardSlashingsSummaryValidators(r.Context(), *dashboardId, groupId, period) + case duties.Proposal: + indices, err = h.dai.GetValidatorDashboardProposalSummaryValidators(r.Context(), *dashboardId, groupId, period) + } + if err != nil { + handleErr(w, r, err) + return + } + // map indices to response format + data, err := mapVDBIndices(indices) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.GetValidatorDashboardSummaryValidatorsResponse{ + Data: data, + } + + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBRewardsColumn](&v, q.Get("sort")) + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardRewards(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardRewardsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + vars := mux.Vars(r) + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + groupId := v.checkGroupId(vars["group_id"], forbidEmpty) + epoch := v.checkUint(vars["epoch"], "epoch") + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, err := h.dai.GetValidatorDashboardGroupRewards(r.Context(), *dashboardId, groupId, epoch, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardGroupRewardsResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + vars := mux.Vars(r) + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, err := h.dai.GetValidatorDashboardRewardsChart(r.Context(), *dashboardId, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardRewardsChartResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + vars := mux.Vars(r) + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + groupId := v.checkGroupId(q.Get("group_id"), allowEmpty) + epoch := v.checkUint(vars["epoch"], "epoch") + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBDutiesColumn](&v, q.Get("sort")) + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardDuties(r.Context(), *dashboardId, epoch, groupId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardDutiesResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBBlocksColumn](&v, q.Get("sort")) + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardBlocks(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardBlocksResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + protocolModes := v.checkProtocolModes(q.Get("modes")) + aggregation := checkEnum[enums.ChartAggregation](&v, r.URL.Query().Get("aggregation"), "aggregation") + chartLimits, err := h.getCurrentChartTimeLimitsForDashboard(r.Context(), dashboardId, aggregation) + if err != nil { + handleErr(w, r, err) + return + } + afterTs, beforeTs := v.checkTimestamps(r, chartLimits) + if v.hasErrors() { + handleErr(w, r, v) + return + } + if afterTs < chartLimits.MinAllowedTs || beforeTs < chartLimits.MinAllowedTs { + returnConflict(w, r, fmt.Errorf("requested time range is too old, minimum timestamp for dashboard owner's premium subscription for this aggregation is %v", chartLimits.MinAllowedTs)) + return + } + + data, err := h.dai.GetValidatorDashboardHeatmap(r.Context(), *dashboardId, protocolModes, aggregation, afterTs, beforeTs) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardHeatmapResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + vars := mux.Vars(r) + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + groupId := v.checkExistingGroupId(vars["group_id"]) + requestedTimestamp := v.checkUint(vars["timestamp"], "timestamp") + protocolModes := v.checkProtocolModes(r.URL.Query().Get("modes")) + aggregation := checkEnum[enums.ChartAggregation](&v, r.URL.Query().Get("aggregation"), "aggregation") + if v.hasErrors() { + handleErr(w, r, v) + return + } + chartLimits, err := h.getCurrentChartTimeLimitsForDashboard(r.Context(), dashboardId, aggregation) + if err != nil { + handleErr(w, r, err) + return + } + if requestedTimestamp < chartLimits.MinAllowedTs || requestedTimestamp > chartLimits.LatestExportedTs { + handleErr(w, r, newConflictErr("requested timestamp is outside of allowed chart history for dashboard owner's premium subscription")) + return + } + + data, err := h.dai.GetValidatorDashboardGroupHeatmap(r.Context(), *dashboardId, groupId, protocolModes, aggregation, requestedTimestamp) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardGroupHeatmapResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + pagingParams := v.checkPagingParams(r.URL.Query()) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardExecutionLayerDepositsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + pagingParams := v.checkPagingParams(r.URL.Query()) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.GetValidatorDashboardConsensusLayerDepositsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { + var err error + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.dai.GetValidatorDashboardTotalClDeposits(r.Context(), *dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.GetValidatorDashboardTotalConsensusDepositsResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { + var err error + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.dai.GetValidatorDashboardTotalElDeposits(r.Context(), *dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.GetValidatorDashboardTotalExecutionDepositsResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBWithdrawalsColumn](&v, q.Get("sort")) + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardWithdrawals(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardWithdrawalsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.ResponseWriter, r *http.Request) { + var v validationError + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + pagingParams := v.checkPagingParams(q) + protocolModes := v.checkProtocolModes(q.Get("modes")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, err := h.dai.GetValidatorDashboardTotalWithdrawals(r.Context(), *dashboardId, pagingParams.search, protocolModes) + if err != nil { + handleErr(w, r, err) + return + } + + response := types.GetValidatorDashboardTotalWithdrawalsResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBRocketPoolColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardRocketPool(r.Context(), *dashboardId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardRocketPoolResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + pagingParams := v.checkPagingParams(q) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, err := h.dai.GetValidatorDashboardTotalRocketPool(r.Context(), *dashboardId, pagingParams.search) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardTotalRocketPoolResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + vars := mux.Vars(r) + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + // support ENS names ? + nodeAddress := v.checkAddress(vars["node_address"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, err := h.dai.GetValidatorDashboardNodeRocketPool(r.Context(), *dashboardId, nodeAddress) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardNodeRocketPoolResponse{ + Data: *data, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { - returnOk(w, r, nil) + var v validationError + vars := mux.Vars(r) + q := r.URL.Query() + dashboardId, err := h.handleDashboardId(r.Context(), vars["dashboard_id"]) + if err != nil { + handleErr(w, r, err) + return + } + // support ENS names ? + nodeAddress := v.checkAddress(vars["node_address"]) + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.VDBRocketPoolMinipoolsColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + + data, paging, err := h.dai.GetValidatorDashboardRocketPoolMinipools(r.Context(), *dashboardId, nodeAddress, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.GetValidatorDashboardRocketPoolMinipoolsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) } func (h *HandlerService) PublicGetNetworkValidators(w http.ResponseWriter, r *http.Request) { diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index e3ce14420..d303080a3 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -265,7 +265,7 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte endpoints := []endpoint{ {http.MethodGet, "/{dashboard_id}", hs.PublicGetValidatorDashboard, hs.InternalGetValidatorDashboard}, - {http.MethodPut, "/{dashboard_id}/name", nil, hs.InternalPutValidatorDashboardName}, + {http.MethodPut, "/{dashboard_id}/name", hs.PublicPutValidatorDashboardName, hs.InternalPutValidatorDashboardName}, {http.MethodPost, "/{dashboard_id}/groups", hs.PublicPostValidatorDashboardGroups, hs.InternalPostValidatorDashboardGroups}, {http.MethodPut, "/{dashboard_id}/groups/{group_id}", hs.PublicPutValidatorDashboardGroups, hs.InternalPutValidatorDashboardGroups}, {http.MethodDelete, "/{dashboard_id}/groups/{group_id}", hs.PublicDeleteValidatorDashboardGroup, hs.InternalDeleteValidatorDashboardGroup}, @@ -277,7 +277,7 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte {http.MethodDelete, "/{dashboard_id}/public-ids/{public_id}", hs.PublicDeleteValidatorDashboardPublicId, hs.InternalDeleteValidatorDashboardPublicId}, {http.MethodGet, "/{dashboard_id}/slot-viz", hs.PublicGetValidatorDashboardSlotViz, hs.InternalGetValidatorDashboardSlotViz}, {http.MethodGet, "/{dashboard_id}/summary", hs.PublicGetValidatorDashboardSummary, hs.InternalGetValidatorDashboardSummary}, - {http.MethodGet, "/{dashboard_id}/summary/validators", nil, hs.InternalGetValidatorDashboardSummaryValidators}, + {http.MethodGet, "/{dashboard_id}/summary/validators", hs.PublicGetValidatorDashboardSummaryValidators, hs.InternalGetValidatorDashboardSummaryValidators}, {http.MethodGet, "/{dashboard_id}/groups/{group_id}/summary", hs.PublicGetValidatorDashboardGroupSummary, hs.InternalGetValidatorDashboardGroupSummary}, {http.MethodGet, "/{dashboard_id}/summary-chart", hs.PublicGetValidatorDashboardSummaryChart, hs.InternalGetValidatorDashboardSummaryChart}, {http.MethodGet, "/{dashboard_id}/rewards", hs.PublicGetValidatorDashboardRewards, hs.InternalGetValidatorDashboardRewards}, @@ -289,10 +289,10 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte {http.MethodGet, "/{dashboard_id}/groups/{group_id}/heatmap/{timestamp}", hs.PublicGetValidatorDashboardGroupHeatmap, hs.InternalGetValidatorDashboardGroupHeatmap}, {http.MethodGet, "/{dashboard_id}/execution-layer-deposits", hs.PublicGetValidatorDashboardExecutionLayerDeposits, hs.InternalGetValidatorDashboardExecutionLayerDeposits}, {http.MethodGet, "/{dashboard_id}/consensus-layer-deposits", hs.PublicGetValidatorDashboardConsensusLayerDeposits, hs.InternalGetValidatorDashboardConsensusLayerDeposits}, - {http.MethodGet, "/{dashboard_id}/total-execution-layer-deposits", nil, hs.InternalGetValidatorDashboardTotalExecutionLayerDeposits}, - {http.MethodGet, "/{dashboard_id}/total-consensus-layer-deposits", nil, hs.InternalGetValidatorDashboardTotalConsensusLayerDeposits}, + {http.MethodGet, "/{dashboard_id}/total-execution-layer-deposits", hs.PublicGetValidatorDashboardTotalExecutionLayerDeposits, hs.InternalGetValidatorDashboardTotalExecutionLayerDeposits}, + {http.MethodGet, "/{dashboard_id}/total-consensus-layer-deposits", hs.PublicGetValidatorDashboardTotalConsensusLayerDeposits, hs.InternalGetValidatorDashboardTotalConsensusLayerDeposits}, {http.MethodGet, "/{dashboard_id}/withdrawals", hs.PublicGetValidatorDashboardWithdrawals, hs.InternalGetValidatorDashboardWithdrawals}, - {http.MethodGet, "/{dashboard_id}/total-withdrawals", nil, hs.InternalGetValidatorDashboardTotalWithdrawals}, + {http.MethodGet, "/{dashboard_id}/total-withdrawals", hs.PublicGetValidatorDashboardTotalWithdrawals, hs.InternalGetValidatorDashboardTotalWithdrawals}, {http.MethodGet, "/{dashboard_id}/rocket-pool", hs.PublicGetValidatorDashboardRocketPool, hs.InternalGetValidatorDashboardRocketPool}, {http.MethodGet, "/{dashboard_id}/total-rocket-pool", hs.PublicGetValidatorDashboardTotalRocketPool, hs.InternalGetValidatorDashboardTotalRocketPool}, {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}", hs.PublicGetValidatorDashboardNodeRocketPool, hs.InternalGetValidatorDashboardNodeRocketPool}, diff --git a/backend/pkg/api/types/slot_viz.go b/backend/pkg/api/types/slot_viz.go index c8d32c1e6..7f1fd3cb0 100644 --- a/backend/pkg/api/types/slot_viz.go +++ b/backend/pkg/api/types/slot_viz.go @@ -42,4 +42,4 @@ type SlotVizEpoch struct { Slots []VDBSlotVizSlot `json:"slots,omitempty" faker:"slice_len=32"` // only on dashboard page } -type InternalGetValidatorDashboardSlotVizResponse ApiDataResponse[[]SlotVizEpoch] +type GetValidatorDashboardSlotVizResponse ApiDataResponse[[]SlotVizEpoch] diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go index c24781631..cef626e57 100644 --- a/backend/pkg/api/types/validator_dashboard.go +++ b/backend/pkg/api/types/validator_dashboard.go @@ -37,7 +37,7 @@ type VDBOverviewData struct { Balances VDBOverviewBalances `json:"balances"` } -type InternalGetValidatorDashboardResponse ApiDataResponse[VDBOverviewData] +type GetValidatorDashboardResponse ApiDataResponse[VDBOverviewData] type VDBPostArchivingReturnData struct { Id uint64 `db:"id" json:"id"` @@ -68,7 +68,7 @@ type VDBSummaryTableRow struct { Proposals StatusCount `json:"proposals"` Reward ClElValue[decimal.Decimal] `json:"reward" faker:"cl_el_eth"` } -type InternalGetValidatorDashboardSummaryResponse ApiPagingResponse[VDBSummaryTableRow] +type GetValidatorDashboardSummaryResponse ApiPagingResponse[VDBSummaryTableRow] type VDBGroupSummaryColumnItem struct { StatusCount StatusCount `json:"status_count"` @@ -108,9 +108,9 @@ type VDBGroupSummaryData struct { Collateral float64 `json:"collateral"` } `json:"rocket_pool,omitempty"` } -type InternalGetValidatorDashboardGroupSummaryResponse ApiDataResponse[VDBGroupSummaryData] +type GetValidatorDashboardGroupSummaryResponse ApiDataResponse[VDBGroupSummaryData] -type InternalGetValidatorDashboardSummaryChartResponse ApiDataResponse[ChartData[int, float64]] // line chart, series id is group id +type GetValidatorDashboardSummaryChartResponse ApiDataResponse[ChartData[int, float64]] // line chart, series id is group id // ------------------------------------------------------------ // Summary Validators @@ -123,7 +123,7 @@ type VDBSummaryValidatorsData struct { Validators []VDBSummaryValidator `json:"validators"` } -type InternalGetValidatorDashboardSummaryValidatorsResponse ApiDataResponse[[]VDBSummaryValidatorsData] +type GetValidatorDashboardSummaryValidatorsResponse ApiDataResponse[[]VDBSummaryValidatorsData] // ------------------------------------------------------------ // Rewards Tab @@ -141,7 +141,7 @@ type VDBRewardsTableRow struct { Reward ClElValue[decimal.Decimal] `json:"reward"` } -type InternalGetValidatorDashboardRewardsResponse ApiPagingResponse[VDBRewardsTableRow] +type GetValidatorDashboardRewardsResponse ApiPagingResponse[VDBRewardsTableRow] type VDBGroupRewardsDetails struct { StatusCount StatusCount `json:"status_count"` @@ -161,9 +161,9 @@ type VDBGroupRewardsData struct { ProposalClSyncIncReward decimal.Decimal `json:"proposal_cl_sync_inc_reward"` ProposalClSlashingIncReward decimal.Decimal `json:"proposal_cl_slashing_inc_reward"` } -type InternalGetValidatorDashboardGroupRewardsResponse ApiDataResponse[VDBGroupRewardsData] +type GetValidatorDashboardGroupRewardsResponse ApiDataResponse[VDBGroupRewardsData] -type InternalGetValidatorDashboardRewardsChartResponse ApiDataResponse[ChartData[int, decimal.Decimal]] // bar chart, series id is group id, property is 'el' or 'cl' +type GetValidatorDashboardRewardsChartResponse ApiDataResponse[ChartData[int, decimal.Decimal]] // bar chart, series id is group id, property is 'el' or 'cl' // Duties Modal @@ -171,7 +171,7 @@ type VDBEpochDutiesTableRow struct { Validator uint64 `json:"validator"` Duties ValidatorHistoryDuties `json:"duties"` } -type InternalGetValidatorDashboardDutiesResponse ApiPagingResponse[VDBEpochDutiesTableRow] +type GetValidatorDashboardDutiesResponse ApiPagingResponse[VDBEpochDutiesTableRow] // ------------------------------------------------------------ // Blocks Tab @@ -186,7 +186,7 @@ type VDBBlocksTableRow struct { Reward *ClElValue[decimal.Decimal] `json:"reward,omitempty"` Graffiti *string `json:"graffiti,omitempty"` } -type InternalGetValidatorDashboardBlocksResponse ApiPagingResponse[VDBBlocksTableRow] +type GetValidatorDashboardBlocksResponse ApiPagingResponse[VDBBlocksTableRow] // ------------------------------------------------------------ // Heatmap Tab @@ -209,7 +209,7 @@ type VDBHeatmap struct { Data []VDBHeatmapCell `json:"data"` Aggregation string `json:"aggregation" tstype:"'epoch' | 'hourly' | 'daily' | 'weekly'" faker:"oneof: epoch, hourly, daily, weekly"` } -type InternalGetValidatorDashboardHeatmapResponse ApiDataResponse[VDBHeatmap] +type GetValidatorDashboardHeatmapResponse ApiDataResponse[VDBHeatmap] type VDBHeatmapTooltipData struct { Timestamp int64 `json:"timestamp"` @@ -224,7 +224,7 @@ type VDBHeatmapTooltipData struct { AttestationIncome decimal.Decimal `json:"attestation_income"` AttestationEfficiency float64 `json:"attestation_efficiency"` } -type InternalGetValidatorDashboardGroupHeatmapResponse ApiDataResponse[VDBHeatmapTooltipData] +type GetValidatorDashboardGroupHeatmapResponse ApiDataResponse[VDBHeatmapTooltipData] // ------------------------------------------------------------ // Deposits Tab @@ -241,7 +241,7 @@ type VDBExecutionDepositsTableRow struct { Amount decimal.Decimal `json:"amount"` Valid bool `json:"valid"` } -type InternalGetValidatorDashboardExecutionLayerDepositsResponse ApiPagingResponse[VDBExecutionDepositsTableRow] +type GetValidatorDashboardExecutionLayerDepositsResponse ApiPagingResponse[VDBExecutionDepositsTableRow] type VDBConsensusDepositsTableRow struct { PublicKey PubKey `json:"public_key"` @@ -253,19 +253,19 @@ type VDBConsensusDepositsTableRow struct { Amount decimal.Decimal `json:"amount"` Signature Hash `json:"signature"` } -type InternalGetValidatorDashboardConsensusLayerDepositsResponse ApiPagingResponse[VDBConsensusDepositsTableRow] +type GetValidatorDashboardConsensusLayerDepositsResponse ApiPagingResponse[VDBConsensusDepositsTableRow] type VDBTotalExecutionDepositsData struct { TotalAmount decimal.Decimal `json:"total_amount"` } -type InternalGetValidatorDashboardTotalExecutionDepositsResponse ApiDataResponse[VDBTotalExecutionDepositsData] +type GetValidatorDashboardTotalExecutionDepositsResponse ApiDataResponse[VDBTotalExecutionDepositsData] type VDBTotalConsensusDepositsData struct { TotalAmount decimal.Decimal `json:"total_amount"` } -type InternalGetValidatorDashboardTotalConsensusDepositsResponse ApiDataResponse[VDBTotalConsensusDepositsData] +type GetValidatorDashboardTotalConsensusDepositsResponse ApiDataResponse[VDBTotalConsensusDepositsData] // ------------------------------------------------------------ // Withdrawals Tab @@ -278,13 +278,13 @@ type VDBWithdrawalsTableRow struct { Amount decimal.Decimal `json:"amount"` IsMissingEstimate bool `json:"is_missing_estimate"` } -type InternalGetValidatorDashboardWithdrawalsResponse ApiPagingResponse[VDBWithdrawalsTableRow] +type GetValidatorDashboardWithdrawalsResponse ApiPagingResponse[VDBWithdrawalsTableRow] type VDBTotalWithdrawalsData struct { TotalAmount decimal.Decimal `json:"total_amount"` } -type InternalGetValidatorDashboardTotalWithdrawalsResponse ApiDataResponse[VDBTotalWithdrawalsData] +type GetValidatorDashboardTotalWithdrawalsResponse ApiDataResponse[VDBTotalWithdrawalsData] // ------------------------------------------------------------ // Rocket Pool Tab @@ -315,9 +315,9 @@ type VDBRocketPoolTableRow struct { Unclaimed decimal.Decimal `json:"unclaimed"` } `json:"smoothing_pool"` } -type InternalGetValidatorDashboardRocketPoolResponse ApiPagingResponse[VDBRocketPoolTableRow] +type GetValidatorDashboardRocketPoolResponse ApiPagingResponse[VDBRocketPoolTableRow] -type InternalGetValidatorDashboardTotalRocketPoolResponse ApiDataResponse[VDBRocketPoolTableRow] +type GetValidatorDashboardTotalRocketPoolResponse ApiDataResponse[VDBRocketPoolTableRow] type VDBNodeRocketPoolData struct { Timezone string `json:"timezone"` @@ -329,7 +329,7 @@ type VDBNodeRocketPoolData struct { } `json:"rpl_stake"` } -type InternalGetValidatorDashboardNodeRocketPoolResponse ApiDataResponse[VDBNodeRocketPoolData] +type GetValidatorDashboardNodeRocketPoolResponse ApiDataResponse[VDBNodeRocketPoolData] type VDBRocketPoolMinipoolsTableRow struct { Node Address `json:"node"` @@ -342,7 +342,7 @@ type VDBRocketPoolMinipoolsTableRow struct { CreatedTimestamp int64 `json:"created_timestamp"` Penalties uint64 `json:"penalties"` } -type InternalGetValidatorDashboardRocketPoolMinipoolsResponse ApiPagingResponse[VDBRocketPoolMinipoolsTableRow] +type GetValidatorDashboardRocketPoolMinipoolsResponse ApiPagingResponse[VDBRocketPoolMinipoolsTableRow] // ------------------------------------------------------------ // Manage Modal @@ -356,7 +356,7 @@ type VDBManageValidatorsTableRow struct { WithdrawalCredential Hash `json:"withdrawal_credential"` } -type InternalGetValidatorDashboardValidatorsResponse ApiPagingResponse[VDBManageValidatorsTableRow] +type GetValidatorDashboardValidatorsResponse ApiPagingResponse[VDBManageValidatorsTableRow] // ------------------------------------------------------------ // Misc. diff --git a/frontend/types/api/slot_viz.ts b/frontend/types/api/slot_viz.ts index 3bc44e3c1..7d9bbc4f1 100644 --- a/frontend/types/api/slot_viz.ts +++ b/frontend/types/api/slot_viz.ts @@ -46,4 +46,4 @@ export interface SlotVizEpoch { progress?: number /* float64 */; // only on landing page slots?: VDBSlotVizSlot[]; // only on dashboard page } -export type InternalGetValidatorDashboardSlotVizResponse = ApiDataResponse; +export type GetValidatorDashboardSlotVizResponse = ApiDataResponse; diff --git a/frontend/types/api/validator_dashboard.ts b/frontend/types/api/validator_dashboard.ts index 4158ecc0c..af31ad96f 100644 --- a/frontend/types/api/validator_dashboard.ts +++ b/frontend/types/api/validator_dashboard.ts @@ -36,7 +36,7 @@ export interface VDBOverviewData { chart_history_seconds: ChartHistorySeconds; balances: VDBOverviewBalances; } -export type InternalGetValidatorDashboardResponse = ApiDataResponse; +export type GetValidatorDashboardResponse = ApiDataResponse; export interface VDBPostArchivingReturnData { id: number /* uint64 */; is_archived: boolean; @@ -61,7 +61,7 @@ export interface VDBSummaryTableRow { proposals: StatusCount; reward: ClElValue; } -export type InternalGetValidatorDashboardSummaryResponse = ApiPagingResponse; +export type GetValidatorDashboardSummaryResponse = ApiPagingResponse; export interface VDBGroupSummaryColumnItem { status_count: StatusCount; validators?: number /* uint64 */[]; @@ -94,8 +94,8 @@ export interface VDBGroupSummaryData { collateral: number /* float64 */; }; } -export type InternalGetValidatorDashboardGroupSummaryResponse = ApiDataResponse; -export type InternalGetValidatorDashboardSummaryChartResponse = ApiDataResponse>; // line chart, series id is group id +export type GetValidatorDashboardGroupSummaryResponse = ApiDataResponse; +export type GetValidatorDashboardSummaryChartResponse = ApiDataResponse>; // line chart, series id is group id /** * ------------------------------------------------------------ * Summary Validators @@ -108,7 +108,7 @@ export interface VDBSummaryValidatorsData { category: 'deposited' | 'online' | 'offline' | 'slashing' | 'slashed' | 'exited' | 'withdrawn' | 'pending' | 'exiting' | 'withdrawing' | 'sync_current' | 'sync_upcoming' | 'sync_past' | 'has_slashed' | 'got_slashed' | 'proposal_proposed' | 'proposal_missed'; validators: VDBSummaryValidator[]; } -export type InternalGetValidatorDashboardSummaryValidatorsResponse = ApiDataResponse; +export type GetValidatorDashboardSummaryValidatorsResponse = ApiDataResponse; /** * ------------------------------------------------------------ * Rewards Tab @@ -125,7 +125,7 @@ export interface VDBRewardsTableRow { group_id: number /* int64 */; reward: ClElValue; } -export type InternalGetValidatorDashboardRewardsResponse = ApiPagingResponse; +export type GetValidatorDashboardRewardsResponse = ApiPagingResponse; export interface VDBGroupRewardsDetails { status_count: StatusCount; income: string /* decimal.Decimal */; @@ -143,13 +143,13 @@ export interface VDBGroupRewardsData { proposal_cl_sync_inc_reward: string /* decimal.Decimal */; proposal_cl_slashing_inc_reward: string /* decimal.Decimal */; } -export type InternalGetValidatorDashboardGroupRewardsResponse = ApiDataResponse; -export type InternalGetValidatorDashboardRewardsChartResponse = ApiDataResponse>; // bar chart, series id is group id, property is 'el' or 'cl' +export type GetValidatorDashboardGroupRewardsResponse = ApiDataResponse; +export type GetValidatorDashboardRewardsChartResponse = ApiDataResponse>; // bar chart, series id is group id, property is 'el' or 'cl' export interface VDBEpochDutiesTableRow { validator: number /* uint64 */; duties: ValidatorHistoryDuties; } -export type InternalGetValidatorDashboardDutiesResponse = ApiPagingResponse; +export type GetValidatorDashboardDutiesResponse = ApiPagingResponse; /** * ------------------------------------------------------------ * Blocks Tab @@ -165,7 +165,7 @@ export interface VDBBlocksTableRow { reward?: ClElValue; graffiti?: string; } -export type InternalGetValidatorDashboardBlocksResponse = ApiPagingResponse; +export type GetValidatorDashboardBlocksResponse = ApiPagingResponse; export interface VDBHeatmapEvents { proposal: boolean; slash: boolean; @@ -183,7 +183,7 @@ export interface VDBHeatmap { data: VDBHeatmapCell[]; aggregation: 'epoch' | 'hourly' | 'daily' | 'weekly'; } -export type InternalGetValidatorDashboardHeatmapResponse = ApiDataResponse; +export type GetValidatorDashboardHeatmapResponse = ApiDataResponse; export interface VDBHeatmapTooltipData { timestamp: number /* int64 */; proposers: StatusCount; @@ -195,7 +195,7 @@ export interface VDBHeatmapTooltipData { attestation_income: string /* decimal.Decimal */; attestation_efficiency: number /* float64 */; } -export type InternalGetValidatorDashboardGroupHeatmapResponse = ApiDataResponse; +export type GetValidatorDashboardGroupHeatmapResponse = ApiDataResponse; /** * ------------------------------------------------------------ * Deposits Tab @@ -213,7 +213,7 @@ export interface VDBExecutionDepositsTableRow { amount: string /* decimal.Decimal */; valid: boolean; } -export type InternalGetValidatorDashboardExecutionLayerDepositsResponse = ApiPagingResponse; +export type GetValidatorDashboardExecutionLayerDepositsResponse = ApiPagingResponse; export interface VDBConsensusDepositsTableRow { public_key: PubKey; index: number /* uint64 */; @@ -224,15 +224,15 @@ export interface VDBConsensusDepositsTableRow { amount: string /* decimal.Decimal */; signature: Hash; } -export type InternalGetValidatorDashboardConsensusLayerDepositsResponse = ApiPagingResponse; +export type GetValidatorDashboardConsensusLayerDepositsResponse = ApiPagingResponse; export interface VDBTotalExecutionDepositsData { total_amount: string /* decimal.Decimal */; } -export type InternalGetValidatorDashboardTotalExecutionDepositsResponse = ApiDataResponse; +export type GetValidatorDashboardTotalExecutionDepositsResponse = ApiDataResponse; export interface VDBTotalConsensusDepositsData { total_amount: string /* decimal.Decimal */; } -export type InternalGetValidatorDashboardTotalConsensusDepositsResponse = ApiDataResponse; +export type GetValidatorDashboardTotalConsensusDepositsResponse = ApiDataResponse; /** * ------------------------------------------------------------ * Withdrawals Tab @@ -246,11 +246,11 @@ export interface VDBWithdrawalsTableRow { amount: string /* decimal.Decimal */; is_missing_estimate: boolean; } -export type InternalGetValidatorDashboardWithdrawalsResponse = ApiPagingResponse; +export type GetValidatorDashboardWithdrawalsResponse = ApiPagingResponse; export interface VDBTotalWithdrawalsData { total_amount: string /* decimal.Decimal */; } -export type InternalGetValidatorDashboardTotalWithdrawalsResponse = ApiDataResponse; +export type GetValidatorDashboardTotalWithdrawalsResponse = ApiDataResponse; /** * ------------------------------------------------------------ * Rocket Pool Tab @@ -282,8 +282,8 @@ export interface VDBRocketPoolTableRow { unclaimed: string /* decimal.Decimal */; }; } -export type InternalGetValidatorDashboardRocketPoolResponse = ApiPagingResponse; -export type InternalGetValidatorDashboardTotalRocketPoolResponse = ApiDataResponse; +export type GetValidatorDashboardRocketPoolResponse = ApiPagingResponse; +export type GetValidatorDashboardTotalRocketPoolResponse = ApiDataResponse; export interface VDBNodeRocketPoolData { timezone: string; refund_balance: string /* decimal.Decimal */; @@ -293,7 +293,7 @@ export interface VDBNodeRocketPoolData { max: string /* decimal.Decimal */; }; } -export type InternalGetValidatorDashboardNodeRocketPoolResponse = ApiDataResponse; +export type GetValidatorDashboardNodeRocketPoolResponse = ApiDataResponse; export interface VDBRocketPoolMinipoolsTableRow { node: Address; validator_index: number /* uint64 */; @@ -305,7 +305,7 @@ export interface VDBRocketPoolMinipoolsTableRow { created_timestamp: number /* int64 */; penalties: number /* uint64 */; } -export type InternalGetValidatorDashboardRocketPoolMinipoolsResponse = ApiPagingResponse; +export type GetValidatorDashboardRocketPoolMinipoolsResponse = ApiPagingResponse; /** * ------------------------------------------------------------ * Manage Modal @@ -319,7 +319,7 @@ export interface VDBManageValidatorsTableRow { queue_position?: number /* uint64 */; withdrawal_credential: Hash; } -export type InternalGetValidatorDashboardValidatorsResponse = ApiPagingResponse; +export type GetValidatorDashboardValidatorsResponse = ApiPagingResponse; /** * ------------------------------------------------------------ * Misc. From 7ae09ef121deeab73cbe0c7ef60b494743e61b7f Mon Sep 17 00:00:00 2001 From: LUCCA DUKIC <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 08:07:58 +0200 Subject: [PATCH 038/187] refactor: rename api types See: BEDS-143 --- .../components/dashboard/ValidatorEpochDutiesModal.vue | 6 +++--- .../components/dashboard/ValidatorManagementModal.vue | 6 +++--- frontend/components/dashboard/chart/RewardsChart.vue | 4 ++-- frontend/components/dashboard/chart/SummaryChart.vue | 4 ++-- .../validator/subset/ValidatorSubsetModal.vue | 4 ++-- frontend/pages/playground.vue | 6 +++--- .../dashboard/useValidatorDashboardBlocksStore.ts | 6 +++--- .../dashboard/useValidatorDashboardClDepositsStore.ts | 10 +++++----- .../dashboard/useValidatorDashboardElDepositsStore.ts | 10 +++++----- .../dashboard/useValidatorDashboardOverviewStore.ts | 4 ++-- .../useValidatorDashboardRewardsDetailsStore.ts | 4 ++-- .../dashboard/useValidatorDashboardRewardsStore.ts | 6 +++--- .../useValidatorDashboardSummaryDetailsStore.ts | 4 ++-- .../dashboard/useValidatorDashboardSummaryStore.ts | 6 +++--- .../dashboard/useValidatorDashboardWithdrawalsStore.ts | 10 +++++----- frontend/stores/dashboard/useValidatorSlotVizStore.ts | 4 ++-- 16 files changed, 47 insertions(+), 47 deletions(-) diff --git a/frontend/components/dashboard/ValidatorEpochDutiesModal.vue b/frontend/components/dashboard/ValidatorEpochDutiesModal.vue index 3d54c7e37..cba3e9356 100644 --- a/frontend/components/dashboard/ValidatorEpochDutiesModal.vue +++ b/frontend/components/dashboard/ValidatorEpochDutiesModal.vue @@ -2,7 +2,7 @@ import type { DataTableSortEvent } from 'primevue/datatable' import type { DashboardKey } from '~/types/dashboard' import type { Cursor } from '~/types/datatable' -import type { InternalGetValidatorDashboardDutiesResponse } from '~/types/api/validator_dashboard' +import type { GetValidatorDashboardDutiesResponse } from '~/types/api/validator_dashboard' import type { ValidatorHistoryDuties } from '~/types/api/common' import type { PathValues } from '~/types/customFetch' import { API_PATH } from '~/types/customFetch' @@ -46,7 +46,7 @@ const { 500, ) -const data = ref() +const data = ref() const onSort = (sort: DataTableSortEvent) => { setQuery(setQuerySort(sort, query?.value)) @@ -70,7 +70,7 @@ const loadData = async () => { if (props.value?.dashboardKey) { isLoading.value = !data.value const testQ = JSON.stringify(query.value) - const result = await fetch( + const result = await fetch( API_PATH.DASHBOARD_VALIDATOR_EPOCH_DUTY, { query: { diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/ValidatorManagementModal.vue index e93e59b08..c7bd58024 100644 --- a/frontend/components/dashboard/ValidatorManagementModal.vue +++ b/frontend/components/dashboard/ValidatorManagementModal.vue @@ -13,7 +13,7 @@ import { } from '#components' import { useValidatorDashboardOverviewStore } from '~/stores/dashboard/useValidatorDashboardOverviewStore' import type { - InternalGetValidatorDashboardValidatorsResponse, + GetValidatorDashboardValidatorsResponse, VDBManageValidatorsTableRow, VDBPostValidatorsData, } from '~/types/api/validator_dashboard' @@ -72,7 +72,7 @@ const { value: query, } = useDebounceValue(initialQuery, 500) -const data = ref() +const data = ref() const selected = ref() const searchBar = ref() const hasNoOpenDialogs = ref(true) @@ -265,7 +265,7 @@ watch(selectedGroup, (value) => { const loadData = async () => { if (dashboardKey.value) { const testQ = JSON.stringify(query.value) - const result = await fetch( + const result = await fetch( API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT, undefined, { dashboardKey: dashboardKey.value }, diff --git a/frontend/components/dashboard/chart/RewardsChart.vue b/frontend/components/dashboard/chart/RewardsChart.vue index 6f8fcb194..5b0925655 100644 --- a/frontend/components/dashboard/chart/RewardsChart.vue +++ b/frontend/components/dashboard/chart/RewardsChart.vue @@ -22,7 +22,7 @@ import { getRewardChartColors, getRewardsChartLineColor, } from '~/utils/colors' -import { type InternalGetValidatorDashboardRewardsChartResponse } from '~/types/api/validator_dashboard' +import { type GetValidatorDashboardRewardsChartResponse } from '~/types/api/validator_dashboard' import { type ChartData } from '~/types/api/common' import { type RewardChartGroupData, @@ -73,7 +73,7 @@ useAsyncData( return } isLoading.value = true - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_VALIDATOR_REWARDS_CHART, undefined, { dashboardKey: dashboardKey.value }, diff --git a/frontend/components/dashboard/chart/SummaryChart.vue b/frontend/components/dashboard/chart/SummaryChart.vue index 7f255c095..927468a3b 100644 --- a/frontend/components/dashboard/chart/SummaryChart.vue +++ b/frontend/components/dashboard/chart/SummaryChart.vue @@ -20,7 +20,7 @@ import { getChartTooltipBackgroundColor, getSummaryChartGroupColors, } from '~/utils/colors' -import { type InternalGetValidatorDashboardSummaryChartResponse } from '~/types/api/validator_dashboard' +import { type GetValidatorDashboardSummaryChartResponse } from '~/types/api/validator_dashboard' import { getGroupLabel } from '~/utils/dashboard/group' import { formatTsToTime } from '~/utils/format' import { API_PATH } from '~/types/customFetch' @@ -173,7 +173,7 @@ const loadData = async () => { isLoading.value = true const newSeries: SeriesObject[] = [] try { - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_SUMMARY_CHART, { query: { diff --git a/frontend/components/dashboard/validator/subset/ValidatorSubsetModal.vue b/frontend/components/dashboard/validator/subset/ValidatorSubsetModal.vue index 6817b10d6..539f9f71b 100644 --- a/frontend/components/dashboard/validator/subset/ValidatorSubsetModal.vue +++ b/frontend/components/dashboard/validator/subset/ValidatorSubsetModal.vue @@ -14,7 +14,7 @@ import type { import { sortSummaryValidators } from '~/utils/dashboard/validator' import { API_PATH } from '~/types/customFetch' import { - type InternalGetValidatorDashboardSummaryValidatorsResponse, + type GetValidatorDashboardSummaryValidatorsResponse, type VDBGroupSummaryData, type VDBSummaryTableRow, type VDBSummaryValidator, @@ -84,7 +84,7 @@ watch( } const res - = await fetch( + = await fetch( API_PATH.DASHBOARD_VALIDATOR_INDICES, { query: { diff --git a/frontend/pages/playground.vue b/frontend/pages/playground.vue index 8bf23d5cc..f8895143d 100644 --- a/frontend/pages/playground.vue +++ b/frontend/pages/playground.vue @@ -9,7 +9,7 @@ import { } from '#components' import { useLatestStateStore } from '~/stores/useLatestStateStore' import { - type InternalGetValidatorDashboardSlotVizResponse, + type GetValidatorDashboardSlotVizResponse, type SlotVizEpoch, } from '~/types/api/slot_viz' import { formatNumber } from '~/utils/format' @@ -28,7 +28,7 @@ const { refreshOverview } = useValidatorDashboardOverviewStore() await Promise.all([ useAsyncData('latest_state', () => refreshLatestState()), useAsyncData('test_slot_viz_data', async () => { - const res = await $fetch( + const res = await $fetch( './mock/dashboard/slotViz.json', ) slotVizData.value = res.data @@ -39,7 +39,7 @@ await Promise.all([ ]) onMounted(async () => { - const res = await $fetch( + const res = await $fetch( './mock/dashboard/slotViz.json', ) slotVizData.value = res.data diff --git a/frontend/stores/dashboard/useValidatorDashboardBlocksStore.ts b/frontend/stores/dashboard/useValidatorDashboardBlocksStore.ts index 430745036..552496899 100644 --- a/frontend/stores/dashboard/useValidatorDashboardBlocksStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardBlocksStore.ts @@ -1,5 +1,5 @@ import { defineStore } from 'pinia' -import type { InternalGetValidatorDashboardBlocksResponse } from '~/types/api/validator_dashboard' +import type { GetValidatorDashboardBlocksResponse } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' import type { TableQueryParams } from '~/types/datatable' import { API_PATH } from '~/types/customFetch' @@ -7,7 +7,7 @@ import { API_PATH } from '~/types/customFetch' const validatorDashboardBlocksStore = defineStore( 'validator_dashboard_blocks_store', () => { - const data = ref() + const data = ref() const query = ref() return { @@ -41,7 +41,7 @@ export function useValidatorDashboardBlocksStore() { } isLoading.value = true storedQuery.value = query - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_VALIDATOR_BLOCKS, undefined, { dashboardKey }, diff --git a/frontend/stores/dashboard/useValidatorDashboardClDepositsStore.ts b/frontend/stores/dashboard/useValidatorDashboardClDepositsStore.ts index aa65b151f..27390c344 100644 --- a/frontend/stores/dashboard/useValidatorDashboardClDepositsStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardClDepositsStore.ts @@ -1,7 +1,7 @@ import { defineStore } from 'pinia' import type { - InternalGetValidatorDashboardConsensusLayerDepositsResponse, - InternalGetValidatorDashboardTotalConsensusDepositsResponse, + GetValidatorDashboardConsensusLayerDepositsResponse, + GetValidatorDashboardTotalConsensusDepositsResponse, } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' import type { TableQueryParams } from '~/types/datatable' @@ -11,7 +11,7 @@ const validatorDashboardClDepositsStore = defineStore( 'validator_dashboard_cl_deposits_store', () => { const data - = ref() + = ref() const total = ref() const query = ref() @@ -50,7 +50,7 @@ export function useValidatorDashboardClDepositsStore() { storedQuery.value = query isLoadingDeposits.value = true const res - = await fetch( + = await fetch( API_PATH.DASHBOARD_CL_DEPOSITS, undefined, { dashboardKey }, @@ -74,7 +74,7 @@ export function useValidatorDashboardClDepositsStore() { } isLoadingTotal.value = true const res - = await fetch( + = await fetch( API_PATH.DASHBOARD_CL_DEPOSITS_TOTAL, undefined, { dashboardKey }, diff --git a/frontend/stores/dashboard/useValidatorDashboardElDepositsStore.ts b/frontend/stores/dashboard/useValidatorDashboardElDepositsStore.ts index fde440ebd..5326e5cca 100644 --- a/frontend/stores/dashboard/useValidatorDashboardElDepositsStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardElDepositsStore.ts @@ -1,7 +1,7 @@ import { defineStore } from 'pinia' import type { - InternalGetValidatorDashboardExecutionLayerDepositsResponse, - InternalGetValidatorDashboardTotalExecutionDepositsResponse, + GetValidatorDashboardExecutionLayerDepositsResponse, + GetValidatorDashboardTotalExecutionDepositsResponse, } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' import type { TableQueryParams } from '~/types/datatable' @@ -11,7 +11,7 @@ const validatorDashboardElDepositsStore = defineStore( 'validator_dashboard_el_deposits_store', () => { const data - = ref() + = ref() const total = ref() const query = ref() @@ -50,7 +50,7 @@ export function useValidatorDashboardElDepositsStore() { storedQuery.value = query isLoadingDeposits.value = true const res - = await fetch( + = await fetch( API_PATH.DASHBOARD_EL_DEPOSITS, undefined, { dashboardKey }, @@ -74,7 +74,7 @@ export function useValidatorDashboardElDepositsStore() { } isLoadingTotal.value = true const res - = await fetch( + = await fetch( API_PATH.DASHBOARD_EL_DEPOSITS_TOTAL, undefined, { dashboardKey }, diff --git a/frontend/stores/dashboard/useValidatorDashboardOverviewStore.ts b/frontend/stores/dashboard/useValidatorDashboardOverviewStore.ts index a94480747..54ed02295 100644 --- a/frontend/stores/dashboard/useValidatorDashboardOverviewStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardOverviewStore.ts @@ -1,7 +1,7 @@ import { defineStore } from 'pinia' import { useAllValidatorDashboardRewardsDetailsStore } from './useValidatorDashboardRewardsDetailsStore' import type { - InternalGetValidatorDashboardResponse, + GetValidatorDashboardResponse, VDBOverviewData, } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' @@ -26,7 +26,7 @@ export function useValidatorDashboardOverviewStore() { return } try { - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_OVERVIEW, undefined, { dashboardKey: key }, diff --git a/frontend/stores/dashboard/useValidatorDashboardRewardsDetailsStore.ts b/frontend/stores/dashboard/useValidatorDashboardRewardsDetailsStore.ts index 2a6fdc4bf..95fa3cdc8 100644 --- a/frontend/stores/dashboard/useValidatorDashboardRewardsDetailsStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardRewardsDetailsStore.ts @@ -1,6 +1,6 @@ import { defineStore } from 'pinia' import type { - InternalGetValidatorDashboardGroupRewardsResponse, + GetValidatorDashboardGroupRewardsResponse, VDBGroupRewardsData, } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' @@ -32,7 +32,7 @@ export const useValidatorDashboardRewardsDetailsStore = ( if (data.value[getKey()]) { return data.value[getKey()] } - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_VALIDATOR_REWARDS_DETAILS, undefined, { diff --git a/frontend/stores/dashboard/useValidatorDashboardRewardsStore.ts b/frontend/stores/dashboard/useValidatorDashboardRewardsStore.ts index 94640bd07..506e96389 100644 --- a/frontend/stores/dashboard/useValidatorDashboardRewardsStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardRewardsStore.ts @@ -1,5 +1,5 @@ import { defineStore } from 'pinia' -import type { InternalGetValidatorDashboardRewardsResponse } from '~/types/api/validator_dashboard' +import type { GetValidatorDashboardRewardsResponse } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' import type { TableQueryParams } from '~/types/datatable' import { DAHSHBOARDS_NEXT_EPOCH_ID } from '~/types/dashboard' @@ -8,7 +8,7 @@ import { API_PATH } from '~/types/customFetch' const validatorDashboardRewardsStore = defineStore( 'validator_dashboard_rewards_store', () => { - const data = ref() + const data = ref() const query = ref() return { @@ -44,7 +44,7 @@ export function useValidatorDashboardRewardsStore() { } isLoading.value = true storedQuery.value = query - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_VALIDATOR_REWARDS, undefined, { dashboardKey }, diff --git a/frontend/stores/dashboard/useValidatorDashboardSummaryDetailsStore.ts b/frontend/stores/dashboard/useValidatorDashboardSummaryDetailsStore.ts index aab2b2533..3a908dc38 100644 --- a/frontend/stores/dashboard/useValidatorDashboardSummaryDetailsStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardSummaryDetailsStore.ts @@ -1,6 +1,6 @@ import { defineStore } from 'pinia' import type { - InternalGetValidatorDashboardGroupSummaryResponse, + GetValidatorDashboardGroupSummaryResponse, VDBGroupSummaryData, } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' @@ -42,7 +42,7 @@ export function useValidatorDashboardSummaryDetailsStore( data.value = {} storeTimeFrame.value = timeFrame } - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_SUMMARY_DETAILS, { query: { period: timeFrame } }, { diff --git a/frontend/stores/dashboard/useValidatorDashboardSummaryStore.ts b/frontend/stores/dashboard/useValidatorDashboardSummaryStore.ts index 1d0c44502..81ea17a75 100644 --- a/frontend/stores/dashboard/useValidatorDashboardSummaryStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardSummaryStore.ts @@ -1,5 +1,5 @@ import { defineStore } from 'pinia' -import type { InternalGetValidatorDashboardSummaryResponse } from '~/types/api/validator_dashboard' +import type { GetValidatorDashboardSummaryResponse } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' import type { TableQueryParams } from '~/types/datatable' import { API_PATH } from '~/types/customFetch' @@ -8,7 +8,7 @@ import type { SummaryTimeFrame } from '~/types/dashboard/summary' const validatorDashboardSummaryStore = defineStore( 'validator_dashboard_sumary_store', () => { - const data = ref() + const data = ref() const query = ref() return { @@ -45,7 +45,7 @@ export function useValidatorDashboardSummaryStore() { isLoading.value = true storedQuery.value = query - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_SUMMARY, { query: { period: timeFrame } }, { dashboardKey }, diff --git a/frontend/stores/dashboard/useValidatorDashboardWithdrawalsStore.ts b/frontend/stores/dashboard/useValidatorDashboardWithdrawalsStore.ts index 98df02667..f76b3d90a 100644 --- a/frontend/stores/dashboard/useValidatorDashboardWithdrawalsStore.ts +++ b/frontend/stores/dashboard/useValidatorDashboardWithdrawalsStore.ts @@ -1,7 +1,7 @@ import { defineStore } from 'pinia' import type { - InternalGetValidatorDashboardTotalWithdrawalsResponse, - InternalGetValidatorDashboardWithdrawalsResponse, + GetValidatorDashboardTotalWithdrawalsResponse, + GetValidatorDashboardWithdrawalsResponse, } from '~/types/api/validator_dashboard' import type { DashboardKey } from '~/types/dashboard' import type { TableQueryParams } from '~/types/datatable' @@ -10,7 +10,7 @@ import { API_PATH } from '~/types/customFetch' const validatorDashboardWithdrawalsStore = defineStore( 'validator_dashboard_withdrawals', () => { - const data = ref() + const data = ref() const total = ref() const query = ref() @@ -49,7 +49,7 @@ export function useValidatorDashboardWithdrawalsStore() { storedQuery.value = query isLoadingWithdrawals.value = true - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_VALIDATOR_WITHDRAWALS, undefined, { dashboardKey }, @@ -74,7 +74,7 @@ export function useValidatorDashboardWithdrawalsStore() { isLoadingTotal.value = true const res - = await fetch( + = await fetch( API_PATH.DASHBOARD_VALIDATOR_TOTAL_WITHDRAWALS, undefined, { dashboardKey }, diff --git a/frontend/stores/dashboard/useValidatorSlotVizStore.ts b/frontend/stores/dashboard/useValidatorSlotVizStore.ts index c1e825ea4..2d3f76ee2 100644 --- a/frontend/stores/dashboard/useValidatorSlotVizStore.ts +++ b/frontend/stores/dashboard/useValidatorSlotVizStore.ts @@ -1,6 +1,6 @@ import { defineStore } from 'pinia' import type { - InternalGetValidatorDashboardSlotVizResponse, + GetValidatorDashboardSlotVizResponse, SlotVizEpoch, } from '~/types/api/slot_viz' import type { DashboardKey } from '~/types/dashboard' @@ -22,7 +22,7 @@ export function useValidatorSlotVizStore() { if (groups?.length) { query = { group_ids: groups.join(',') } } - const res = await fetch( + const res = await fetch( API_PATH.DASHBOARD_SLOTVIZ, { headers: {}, From 8b191f98e44fc7d79f8959ebfdcf7a8cfe0ed77b Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 08:14:35 +0200 Subject: [PATCH 039/187] (BEDS-347) refactor dummy (#774) --- backend/pkg/api/data_access/dummy.go | 489 ++++++++------------------- 1 file changed, 133 insertions(+), 356 deletions(-) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index ea5b3f05d..6f538dc95 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -56,50 +56,59 @@ func commonFakeData(a interface{}) error { return faker.FakeData(a, options.WithRandomMapAndSliceMaxSize(5)) } +// used for any non-pointer data, e.g. all primitive types or slices +func getDummyData[T any]() (T, error) { + var r T + err := commonFakeData(&r) + return r, err +} + +// used for any struct data that should be returned as a pointer +func getDummyStruct[T any]() (*T, error) { + var r T + err := commonFakeData(&r) + return &r, err +} + +// used for any table data that should be returned with paging +func getDummyWithPaging[T any]() ([]T, *t.Paging, error) { + r := []T{} + p := t.Paging{} + _ = commonFakeData(&r) + err := commonFakeData(&p) + return r, &p, err +} + func (d *DummyService) Close() { // nothing to close } func (d *DummyService) GetLatestSlot() (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetLatestFinalizedEpoch() (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetLatestBlock() (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetBlockHeightAt(slot uint64) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetLatestExchangeRates() ([]t.EthConversionRate, error) { - r := []t.EthConversionRate{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.EthConversionRate]() } func (d *DummyService) GetUserByEmail(ctx context.Context, email string) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) CreateUser(ctx context.Context, email, password string) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) RemoveUser(ctx context.Context, userId uint64) error { @@ -115,15 +124,11 @@ func (d *DummyService) UpdateUserPassword(ctx context.Context, userId uint64, pa } func (d *DummyService) GetEmailConfirmationTime(ctx context.Context, userId uint64) (time.Time, error) { - r := time.Time{} - err := commonFakeData(&r) - return r, err + return getDummyData[time.Time]() } func (d *DummyService) GetPasswordResetTime(ctx context.Context, userId uint64) (time.Time, error) { - r := time.Time{} - err := commonFakeData(&r) - return r, err + return getDummyData[time.Time]() } func (d *DummyService) UpdateEmailConfirmationTime(ctx context.Context, userId uint64) error { @@ -147,96 +152,66 @@ func (d *DummyService) UpdatePasswordResetHash(ctx context.Context, userId uint6 } func (d *DummyService) GetUserInfo(ctx context.Context, userId uint64) (*t.UserInfo, error) { - r := t.UserInfo{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.UserInfo]() } func (d *DummyService) GetUserCredentialInfo(ctx context.Context, userId uint64) (*t.UserCredentialInfo, error) { - r := t.UserCredentialInfo{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.UserCredentialInfo]() } func (d *DummyService) GetUserIdByApiKey(ctx context.Context, apiKey string) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetUserIdByConfirmationHash(ctx context.Context, hash string) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetUserIdByResetHash(ctx context.Context, hash string) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetProductSummary(ctx context.Context) (*t.ProductSummary, error) { - r := t.ProductSummary{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.ProductSummary]() } func (d *DummyService) GetFreeTierPerks(ctx context.Context) (*t.PremiumPerks, error) { - r := t.PremiumPerks{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.PremiumPerks]() } func (d *DummyService) GetValidatorDashboardUser(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.DashboardUser, error) { - r := t.DashboardUser{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.DashboardUser]() } func (d *DummyService) GetValidatorDashboardIdByPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) (*t.VDBIdPrimary, error) { - var r t.VDBIdPrimary - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBIdPrimary]() } func (d *DummyService) GetValidatorDashboardInfo(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.ValidatorDashboard, error) { - r := t.ValidatorDashboard{} + r, err := getDummyStruct[t.ValidatorDashboard]() // return semi-valid data to not break staging - //nolint:errcheck - commonFakeData(&r) r.IsArchived = false - return &r, nil + return r, err } func (d *DummyService) GetValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary) (string, error) { - r := "" - err := commonFakeData(&r) - return r, err + return getDummyData[string]() } func (d *DummyService) GetValidatorsFromSlices(indices []uint64, publicKeys []string) ([]t.VDBValidator, error) { - r := []t.VDBValidator{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.VDBValidator]() } func (d *DummyService) GetUserDashboards(ctx context.Context, userId uint64) (*t.UserDashboardsData, error) { - r := t.UserDashboardsData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.UserDashboardsData]() } func (d *DummyService) CreateValidatorDashboard(ctx context.Context, userId uint64, name string, network uint64) (*t.VDBPostReturnData, error) { - r := t.VDBPostReturnData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPostReturnData]() } func (d *DummyService) GetValidatorDashboardOverview(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes) (*t.VDBOverviewData, error) { - r := t.VDBOverviewData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBOverviewData]() } func (d *DummyService) RemoveValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary) error { @@ -244,27 +219,19 @@ func (d *DummyService) RemoveValidatorDashboard(ctx context.Context, dashboardId } func (d *DummyService) UpdateValidatorDashboardArchiving(ctx context.Context, dashboardId t.VDBIdPrimary, archived bool) (*t.VDBPostArchivingReturnData, error) { - r := t.VDBPostArchivingReturnData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPostArchivingReturnData]() } func (d *DummyService) UpdateValidatorDashboardName(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostReturnData, error) { - r := t.VDBPostReturnData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPostReturnData]() } func (d *DummyService) CreateValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, name string) (*t.VDBPostCreateGroupData, error) { - r := t.VDBPostCreateGroupData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPostCreateGroupData]() } func (d *DummyService) UpdateValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, name string) (*t.VDBPostCreateGroupData, error) { - r := t.VDBPostCreateGroupData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPostCreateGroupData]() } func (d *DummyService) RemoveValidatorDashboardGroup(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64) error { @@ -276,41 +243,27 @@ func (d *DummyService) GetValidatorDashboardGroupExists(ctx context.Context, das } func (d *DummyService) GetValidatorDashboardExistingValidatorCount(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) { - r := []t.VDBPostValidatorsData{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.VDBPostValidatorsData]() } func (d *DummyService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - r := []t.VDBPostValidatorsData{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.VDBPostValidatorsData]() } func (d *DummyService) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) { - r := []t.VDBPostValidatorsData{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.VDBPostValidatorsData]() } func (d *DummyService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) { - r := []t.VDBPostValidatorsData{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.VDBPostValidatorsData]() } func (d *DummyService) GetValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, cursor string, colSort t.Sort[enums.VDBManageValidatorsColumn], search string, limit uint64) ([]t.VDBManageValidatorsTableRow, *t.Paging, error) { - r := []t.VDBManageValidatorsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBManageValidatorsTableRow]() } func (d *DummyService) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error { @@ -318,21 +271,15 @@ func (d *DummyService) RemoveValidatorDashboardValidators(ctx context.Context, d } func (d *DummyService) CreateValidatorDashboardPublicId(ctx context.Context, dashboardId t.VDBIdPrimary, name string, shareGroups bool) (*t.VDBPublicId, error) { - r := t.VDBPublicId{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPublicId]() } func (d *DummyService) GetValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) (*t.VDBPublicId, error) { - r := t.VDBPublicId{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPublicId]() } func (d *DummyService) UpdateValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic, name string, shareGroups bool) (*t.VDBPublicId, error) { - r := t.VDBPublicId{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBPublicId]() } func (d *DummyService) RemoveValidatorDashboardPublicId(ctx context.Context, publicDashboardId t.VDBIdPublic) error { @@ -348,293 +295,175 @@ func (d *DummyService) GetValidatorDashboardSlotViz(ctx context.Context, dashboa } func (d *DummyService) GetValidatorDashboardSummary(ctx context.Context, dashboardId t.VDBId, period enums.TimePeriod, cursor string, colSort t.Sort[enums.VDBSummaryColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBSummaryTableRow, *t.Paging, error) { - r := []t.VDBSummaryTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBSummaryTableRow]() } func (d *DummyService) GetValidatorDashboardGroupSummary(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod, protocolModes t.VDBProtocolModes) (*t.VDBGroupSummaryData, error) { - r := t.VDBGroupSummaryData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBGroupSummaryData]() } func (d *DummyService) GetValidatorDashboardSummaryChart(ctx context.Context, dashboardId t.VDBId, groupIds []int64, efficiency enums.VDBSummaryChartEfficiencyType, aggregation enums.ChartAggregation, afterTs uint64, beforeTs uint64) (*t.ChartData[int, float64], error) { - r := t.ChartData[int, float64]{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.ChartData[int, float64]]() } func (d *DummyService) GetValidatorDashboardSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64) (*t.VDBGeneralSummaryValidators, error) { - r := t.VDBGeneralSummaryValidators{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBGeneralSummaryValidators]() } func (d *DummyService) GetValidatorDashboardSyncSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod) (*t.VDBSyncSummaryValidators, error) { - r := t.VDBSyncSummaryValidators{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBSyncSummaryValidators]() } func (d *DummyService) GetValidatorDashboardSlashingsSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod) (*t.VDBSlashingsSummaryValidators, error) { - r := t.VDBSlashingsSummaryValidators{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBSlashingsSummaryValidators]() } func (d *DummyService) GetValidatorDashboardProposalSummaryValidators(ctx context.Context, dashboardId t.VDBId, groupId int64, period enums.TimePeriod) (*t.VDBProposalSummaryValidators, error) { - r := t.VDBProposalSummaryValidators{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBProposalSummaryValidators]() } func (d *DummyService) GetValidatorDashboardRewards(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBRewardsColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBRewardsTableRow, *t.Paging, error) { - r := []t.VDBRewardsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBRewardsTableRow]() } func (d *DummyService) GetValidatorDashboardGroupRewards(ctx context.Context, dashboardId t.VDBId, groupId int64, epoch uint64, protocolModes t.VDBProtocolModes) (*t.VDBGroupRewardsData, error) { - r := t.VDBGroupRewardsData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBGroupRewardsData]() } func (d *DummyService) GetValidatorDashboardRewardsChart(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes) (*t.ChartData[int, decimal.Decimal], error) { - r := t.ChartData[int, decimal.Decimal]{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.ChartData[int, decimal.Decimal]]() } func (d *DummyService) GetValidatorDashboardDuties(ctx context.Context, dashboardId t.VDBId, epoch uint64, groupId int64, cursor string, colSort t.Sort[enums.VDBDutiesColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBEpochDutiesTableRow, *t.Paging, error) { - r := []t.VDBEpochDutiesTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBEpochDutiesTableRow]() } func (d *DummyService) GetValidatorDashboardBlocks(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBBlocksColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBBlocksTableRow, *t.Paging, error) { - r := []t.VDBBlocksTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBBlocksTableRow]() } func (d *DummyService) GetValidatorDashboardHeatmap(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes, aggregation enums.ChartAggregation, afterTs uint64, beforeTs uint64) (*t.VDBHeatmap, error) { - r := t.VDBHeatmap{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBHeatmap]() } func (d *DummyService) GetValidatorDashboardGroupHeatmap(ctx context.Context, dashboardId t.VDBId, groupId uint64, protocolModes t.VDBProtocolModes, aggregation enums.ChartAggregation, timestamp uint64) (*t.VDBHeatmapTooltipData, error) { - r := t.VDBHeatmapTooltipData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBHeatmapTooltipData]() } func (d *DummyService) GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) { - r := []t.VDBExecutionDepositsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBExecutionDepositsTableRow]() } func (d *DummyService) GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) { - r := []t.VDBConsensusDepositsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBConsensusDepositsTableRow]() } func (d *DummyService) GetValidatorDashboardTotalElDeposits(ctx context.Context, dashboardId t.VDBId) (*t.VDBTotalExecutionDepositsData, error) { - r := t.VDBTotalExecutionDepositsData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBTotalExecutionDepositsData]() } func (d *DummyService) GetValidatorDashboardTotalClDeposits(ctx context.Context, dashboardId t.VDBId) (*t.VDBTotalConsensusDepositsData, error) { - r := t.VDBTotalConsensusDepositsData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBTotalConsensusDepositsData]() } func (d *DummyService) GetValidatorDashboardWithdrawals(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBWithdrawalsColumn], search string, limit uint64, protocolModes t.VDBProtocolModes) ([]t.VDBWithdrawalsTableRow, *t.Paging, error) { - r := []t.VDBWithdrawalsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return []t.VDBWithdrawalsTableRow{}, &t.Paging{}, nil } func (d *DummyService) GetValidatorDashboardTotalWithdrawals(ctx context.Context, dashboardId t.VDBId, search string, protocolModes t.VDBProtocolModes) (*t.VDBTotalWithdrawalsData, error) { - r := t.VDBTotalWithdrawalsData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBTotalWithdrawalsData]() } func (d *DummyService) GetValidatorDashboardRocketPool(ctx context.Context, dashboardId t.VDBId, cursor string, colSort t.Sort[enums.VDBRocketPoolColumn], search string, limit uint64) ([]t.VDBRocketPoolTableRow, *t.Paging, error) { - r := []t.VDBRocketPoolTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBRocketPoolTableRow]() } func (d *DummyService) GetValidatorDashboardTotalRocketPool(ctx context.Context, dashboardId t.VDBId, search string) (*t.VDBRocketPoolTableRow, error) { - r := t.VDBRocketPoolTableRow{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBRocketPoolTableRow]() } func (d *DummyService) GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) { - r := t.VDBNodeRocketPoolData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.VDBNodeRocketPoolData]() } func (d *DummyService) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node string, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) { - r := []t.VDBRocketPoolMinipoolsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.VDBRocketPoolMinipoolsTableRow]() } func (d *DummyService) GetAllNetworks() ([]t.NetworkInfo, error) { - r := []t.NetworkInfo{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.NetworkInfo]() } func (d *DummyService) GetSearchValidatorByIndex(ctx context.Context, chainId, index uint64) (*t.SearchValidator, error) { - r := t.SearchValidator{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidator]() } func (d *DummyService) GetSearchValidatorByPublicKey(ctx context.Context, chainId uint64, publicKey []byte) (*t.SearchValidator, error) { - r := t.SearchValidator{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidator]() } func (d *DummyService) GetSearchValidatorsByDepositAddress(ctx context.Context, chainId uint64, address []byte) (*t.SearchValidatorsByDepositAddress, error) { - r := t.SearchValidatorsByDepositAddress{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidatorsByDepositAddress]() } func (d *DummyService) GetSearchValidatorsByDepositEnsName(ctx context.Context, chainId uint64, ensName string) (*t.SearchValidatorsByDepositEnsName, error) { - r := t.SearchValidatorsByDepositEnsName{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidatorsByDepositEnsName]() } func (d *DummyService) GetSearchValidatorsByWithdrawalCredential(ctx context.Context, chainId uint64, credential []byte) (*t.SearchValidatorsByWithdrwalCredential, error) { - r := t.SearchValidatorsByWithdrwalCredential{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidatorsByWithdrwalCredential]() } func (d *DummyService) GetSearchValidatorsByWithdrawalEnsName(ctx context.Context, chainId uint64, ensName string) (*t.SearchValidatorsByWithrawalEnsName, error) { - r := t.SearchValidatorsByWithrawalEnsName{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidatorsByWithrawalEnsName]() } func (d *DummyService) GetSearchValidatorsByGraffiti(ctx context.Context, chainId uint64, graffiti string) (*t.SearchValidatorsByGraffiti, error) { - r := t.SearchValidatorsByGraffiti{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.SearchValidatorsByGraffiti]() } func (d *DummyService) GetUserValidatorDashboardCount(ctx context.Context, userId uint64, active bool) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetValidatorDashboardGroupCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetValidatorDashboardValidatorsCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetValidatorDashboardPublicIdCount(ctx context.Context, dashboardId t.VDBIdPrimary) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetNotificationOverview(ctx context.Context, userId uint64) (*t.NotificationOverviewData, error) { - r := t.NotificationOverviewData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.NotificationOverviewData]() } func (d *DummyService) GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) { - r := []t.NotificationDashboardsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.NotificationDashboardsTableRow]() } func (d *DummyService) GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) { - r := t.NotificationValidatorDashboardDetail{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.NotificationValidatorDashboardDetail]() } func (d *DummyService) GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) { - r := t.NotificationAccountDashboardDetail{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.NotificationAccountDashboardDetail]() } func (d *DummyService) GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) { - r := []t.NotificationMachinesTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.NotificationMachinesTableRow]() } func (d *DummyService) GetClientNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationClientsColumn], search string, limit uint64) ([]t.NotificationClientsTableRow, *t.Paging, error) { - r := []t.NotificationClientsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.NotificationClientsTableRow]() } func (d *DummyService) GetRocketPoolNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationRocketPoolColumn], search string, limit uint64) ([]t.NotificationRocketPoolTableRow, *t.Paging, error) { - r := []t.NotificationRocketPoolTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.NotificationRocketPoolTableRow]() } func (d *DummyService) GetNetworkNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationNetworksColumn], search string, limit uint64) ([]t.NotificationNetworksTableRow, *t.Paging, error) { - r := []t.NotificationNetworksTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) - return r, &p, err + return getDummyWithPaging[t.NotificationNetworksTableRow]() } func (d *DummyService) GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) { - r := t.NotificationSettings{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.NotificationSettings]() } func (d *DummyService) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error { return nil @@ -649,10 +478,7 @@ func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Contex return nil } func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { - r := []t.NotificationSettingsDashboardsTableRow{} - p := t.Paging{} - _ = commonFakeData(&r) - err := commonFakeData(&p) + r, p, err := getDummyWithPaging[t.NotificationSettingsDashboardsTableRow]() for i, n := range r { var settings interface{} if n.IsAccountDashboard { @@ -663,7 +489,7 @@ func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, us _ = commonFakeData(&settings) r[i].Settings = settings } - return r, &p, err + return r, p, err } func (d *DummyService) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error { return nil @@ -676,9 +502,7 @@ func (d *DummyService) CreateAdConfiguration(ctx context.Context, key, jquerySel } func (d *DummyService) GetAdConfigurations(ctx context.Context, keys []string) ([]t.AdConfigurationData, error) { - r := []t.AdConfigurationData{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.AdConfigurationData]() } func (d *DummyService) UpdateAdConfiguration(ctx context.Context, key, jquerySelector string, insertMode enums.AdInsertMode, refreshInterval uint64, forAllUsers bool, bannerId uint64, htmlContent string, enabled bool) error { @@ -690,15 +514,11 @@ func (d *DummyService) RemoveAdConfiguration(ctx context.Context, key string) er } func (d *DummyService) GetLatestExportedChartTs(ctx context.Context, aggregation enums.ChartAggregation) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) GetUserIdByRefreshToken(claimUserID, claimAppID, claimDeviceID uint64, hashedRefreshToken string) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) MigrateMobileSession(oldHashedRefreshToken, newHashedRefreshToken, deviceID, deviceName string) error { @@ -706,9 +526,7 @@ func (d *DummyService) MigrateMobileSession(oldHashedRefreshToken, newHashedRefr } func (d *DummyService) GetAppDataFromRedirectUri(callback string) (*t.OAuthAppData, error) { - r := t.OAuthAppData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.OAuthAppData]() } func (d *DummyService) AddUserDevice(userID uint64, hashedRefreshToken string, deviceID, deviceName string, appID uint64) error { @@ -720,9 +538,7 @@ func (d *DummyService) AddMobileNotificationToken(userID uint64, deviceID, notif } func (d *DummyService) GetAppSubscriptionCount(userID uint64) (uint64, error) { - r := uint64(0) - err := commonFakeData(&r) - return r, err + return getDummyData[uint64]() } func (d *DummyService) AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error { @@ -730,121 +546,82 @@ func (d *DummyService) AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetai } func (d *DummyService) GetBlockOverview(ctx context.Context, chainId, block uint64) (*t.BlockOverview, error) { - r := t.BlockOverview{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.BlockOverview]() } func (d *DummyService) GetBlockTransactions(ctx context.Context, chainId, block uint64) ([]t.BlockTransactionTableRow, error) { - r := []t.BlockTransactionTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockTransactionTableRow]() } func (d *DummyService) GetBlock(ctx context.Context, chainId, block uint64) (*t.BlockSummary, error) { - r := t.BlockSummary{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.BlockSummary]() } func (d *DummyService) GetBlockVotes(ctx context.Context, chainId, block uint64) ([]t.BlockVoteTableRow, error) { - r := []t.BlockVoteTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockVoteTableRow]() } func (d *DummyService) GetBlockAttestations(ctx context.Context, chainId, block uint64) ([]t.BlockAttestationTableRow, error) { - r := []t.BlockAttestationTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockAttestationTableRow]() } func (d *DummyService) GetBlockWithdrawals(ctx context.Context, chainId, block uint64) ([]t.BlockWithdrawalTableRow, error) { - r := []t.BlockWithdrawalTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockWithdrawalTableRow]() } func (d *DummyService) GetBlockBlsChanges(ctx context.Context, chainId, block uint64) ([]t.BlockBlsChangeTableRow, error) { - r := []t.BlockBlsChangeTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockBlsChangeTableRow]() } func (d *DummyService) GetBlockVoluntaryExits(ctx context.Context, chainId, block uint64) ([]t.BlockVoluntaryExitTableRow, error) { - r := []t.BlockVoluntaryExitTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockVoluntaryExitTableRow]() } func (d *DummyService) GetBlockBlobs(ctx context.Context, chainId, block uint64) ([]t.BlockBlobTableRow, error) { - r := []t.BlockBlobTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockBlobTableRow]() } func (d *DummyService) GetSlot(ctx context.Context, chainId, block uint64) (*t.BlockSummary, error) { - r := t.BlockSummary{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.BlockSummary]() } func (d *DummyService) GetSlotOverview(ctx context.Context, chainId, block uint64) (*t.BlockOverview, error) { - r := t.BlockOverview{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.BlockOverview]() } func (d *DummyService) GetSlotTransactions(ctx context.Context, chainId, block uint64) ([]t.BlockTransactionTableRow, error) { - r := []t.BlockTransactionTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockTransactionTableRow]() } func (d *DummyService) GetSlotVotes(ctx context.Context, chainId, block uint64) ([]t.BlockVoteTableRow, error) { - r := []t.BlockVoteTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockVoteTableRow]() } func (d *DummyService) GetSlotAttestations(ctx context.Context, chainId, block uint64) ([]t.BlockAttestationTableRow, error) { - r := []t.BlockAttestationTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockAttestationTableRow]() } func (d *DummyService) GetSlotWithdrawals(ctx context.Context, chainId, block uint64) ([]t.BlockWithdrawalTableRow, error) { - r := []t.BlockWithdrawalTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockWithdrawalTableRow]() } func (d *DummyService) GetSlotBlsChanges(ctx context.Context, chainId, block uint64) ([]t.BlockBlsChangeTableRow, error) { - r := []t.BlockBlsChangeTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockBlsChangeTableRow]() } func (d *DummyService) GetSlotVoluntaryExits(ctx context.Context, chainId, block uint64) ([]t.BlockVoluntaryExitTableRow, error) { - r := []t.BlockVoluntaryExitTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockVoluntaryExitTableRow]() } func (d *DummyService) GetSlotBlobs(ctx context.Context, chainId, block uint64) ([]t.BlockBlobTableRow, error) { - r := []t.BlockBlobTableRow{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.BlockBlobTableRow]() } func (d *DummyService) GetRocketPoolOverview(ctx context.Context) (*t.RocketPoolData, error) { - r := t.RocketPoolData{} - err := commonFakeData(&r) - return &r, err + return getDummyStruct[t.RocketPoolData]() } func (d *DummyService) GetHealthz(ctx context.Context, showAll bool) types.HealthzData { - r := types.HealthzData{} - _ = commonFakeData(&r) + r, _ := getDummyData[types.HealthzData]() return r } From ecd852c27869225c0a43f8db112762cccd7e95b9 Mon Sep 17 00:00:00 2001 From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:03:40 +0200 Subject: [PATCH 040/187] Changed to a new entry in the cache --- backend/pkg/api/data_access/vdb_slotviz.go | 19 +------------------ backend/pkg/api/services/service_slot_viz.go | 6 ++++++ 2 files changed, 7 insertions(+), 18 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_slotviz.go b/backend/pkg/api/data_access/vdb_slotviz.go index 875b03214..98d8c60d9 100644 --- a/backend/pkg/api/data_access/vdb_slotviz.go +++ b/backend/pkg/api/data_access/vdb_slotviz.go @@ -34,23 +34,6 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da return nil, err } - latestProposedSlot := int64(-1) - for slot := dutiesInfo.LatestSlot; ; slot-- { - if _, ok := dutiesInfo.PropAssignmentsForSlot[slot]; ok { - if dutiesInfo.SlotStatus[slot] == 1 { - latestProposedSlot = int64(slot) - break - } - } else { - // No more data available - break - } - - if slot == 0 { - break - } - } - epochToIndexMap := make(map[uint64]uint64) slotToIndexMap := make(map[uint64]uint64) @@ -221,7 +204,7 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da } attestationsRef := slotVizEpochs[epochIdx].Slots[slotIdx].Attestations - if latestProposedSlot == -1 || uint64(slot) >= uint64(latestProposedSlot) { + if uint64(slot) >= dutiesInfo.LatestProposedSlot { if attestationsRef.Scheduled == nil { attestationsRef.Scheduled = &t.VDBSlotVizDuty{} } diff --git a/backend/pkg/api/services/service_slot_viz.go b/backend/pkg/api/services/service_slot_viz.go index e26145445..4631e3ded 100644 --- a/backend/pkg/api/services/service_slot_viz.go +++ b/backend/pkg/api/services/service_slot_viz.go @@ -203,6 +203,9 @@ func (s *Services) updateSlotVizData() error { if duty.Slot > dutiesInfo.LatestSlot { dutiesInfo.LatestSlot = duty.Slot } + if duty.Status == 1 && duty.Slot > dutiesInfo.LatestProposedSlot { + dutiesInfo.LatestProposedSlot = duty.Slot + } dutiesInfo.SlotStatus[duty.Slot] = duty.Status dutiesInfo.SlotBlock[duty.Slot] = duty.Block if duty.Status == 1 { // 1: Proposed @@ -292,6 +295,7 @@ func (s *Services) GetCurrentDutiesInfo() (*SyncData, error) { func (s *Services) initDutiesInfo() *SyncData { dutiesInfo := SyncData{} dutiesInfo.LatestSlot = uint64(0) + dutiesInfo.LatestProposedSlot = uint64(0) dutiesInfo.SlotStatus = make(map[uint64]int8) dutiesInfo.SlotBlock = make(map[uint64]uint64) dutiesInfo.SlotSyncParticipated = make(map[uint64]map[constypes.ValidatorIndex]bool) @@ -318,6 +322,7 @@ func (s *Services) copyAndCleanDutiesInfo() *SyncData { dutiesInfo := &SyncData{ LatestSlot: p.LatestSlot, + LatestProposedSlot: p.LatestProposedSlot, SlotStatus: make(map[uint64]int8, len(p.SlotStatus)), SlotBlock: make(map[uint64]uint64, len(p.SlotBlock)), SlotSyncParticipated: make(map[uint64]map[constypes.ValidatorIndex]bool, len(p.SlotSyncParticipated)), @@ -451,6 +456,7 @@ func (s *Services) getMaxValidatorDutiesInfoSlot() uint64 { type SyncData struct { LatestSlot uint64 + LatestProposedSlot uint64 SlotStatus map[uint64]int8 // slot -> status SlotBlock map[uint64]uint64 // slot -> block SlotSyncParticipated map[uint64]map[uint64]bool // slot -> validatorindex -> participated From b4a274f93f0fa2c95f60c2bd36d82873b63ef00a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 09:22:41 +0000 Subject: [PATCH 041/187] feat(dashboard): integration test scaffolding --- backend/pkg/api/handlers/handlers_test.go | 139 ++++++++++++++++++++++ 1 file changed, 139 insertions(+) create mode 100644 backend/pkg/api/handlers/handlers_test.go diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go new file mode 100644 index 000000000..5cd7128cc --- /dev/null +++ b/backend/pkg/api/handlers/handlers_test.go @@ -0,0 +1,139 @@ +package handlers_test + +import ( + "encoding/json" + "flag" + "io" + "net/http" + "net/http/httptest" + "os" + "testing" + + "github.com/gobitfly/beaconchain/pkg/api" + dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + api_types "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/types" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/commons/version" + "github.com/gorilla/mux" +) + +var router *mux.Router +var dataAccessor dataaccess.DataAccessor + +func TestMain(m *testing.M) { + setup() + defer teardown() + + // wait till service initialization is completed (TODO: find a better way to do this) + // time.Sleep(30 * time.Second) + + os.Exit(m.Run()) +} + +func teardown() { + dataAccessor.Close() +} + +func setup() { + configPath := flag.String("config", "", "Path to the config file, if empty string defaults will be used") + + flag.Parse() + + cfg := &types.Config{} + err := utils.ReadConfig(cfg, *configPath) + if err != nil { + log.Fatal(err, "error reading config file", 0) + } + utils.Config = cfg + + log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") + + dataAccessor = dataaccess.NewDataAccessService(cfg) + router = api.NewApiRouter(dataAccessor, cfg) +} + +func TestInternalGetProductSummaryHandler(t *testing.T) { + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/i/product-summary", nil) + + router.ServeHTTP(w, req) + + resp := w.Result() + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected status code 200, got %d", resp.StatusCode) + } + + data, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + + respData := api_types.InternalGetProductSummaryResponse{} + err = json.Unmarshal(data, &respData) + if err != nil { + log.Infof("%s", string(data)) + t.Fatal(err) + } + + if respData.Data.ValidatorsPerDashboardLimit == 0 { + t.Fatal("ValidatorsPerDashboardLimit is 0") + } + + if len(respData.Data.ApiProducts) == 0 { + t.Fatal("ApiProducts length is 0") + } +} + +func TestInternalGetLatestStateHandler(t *testing.T) { + req := httptest.NewRequest(http.MethodGet, "/api/i/latest-state", nil) + + w := httptest.NewRecorder() + + router.ServeHTTP(w, req) + + resp := w.Result() + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("expected status code 200, got %d", resp.StatusCode) + } + + data, err := io.ReadAll(resp.Body) + + if err != nil { + t.Fatal(err) + } + + respData := api_types.InternalGetLatestStateResponse{} + err = json.Unmarshal(data, &respData) + if err != nil { + t.Fatal(err) + } + + if respData.Data.LatestSlot == 0 { + t.Fatal("latest slot is 0") + } + + if respData.Data.FinalizedEpoch == 0 { + t.Fatal("finalized epoch is 0") + } +} + +func TestInternalPostAdConfigurationsHandler(t *testing.T) { + req := httptest.NewRequest(http.MethodPost, "/api/i/ad-configurations", nil) + + w := httptest.NewRecorder() + + router.ServeHTTP(w, req) + + resp := w.Result() + defer resp.Body.Close() + + if resp.StatusCode != http.StatusUnauthorized { + t.Fatalf("expected status code 401, got %d", resp.StatusCode) + } +} From c6993dadf05f5ff8b46445b88f38a642093067d5 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 12:15:20 +0200 Subject: [PATCH 042/187] BEDS-393: frontend: fix shared dashboard url --- frontend/components/dashboard/DashboardShareCodeModal.vue | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/frontend/components/dashboard/DashboardShareCodeModal.vue b/frontend/components/dashboard/DashboardShareCodeModal.vue index 64a8e51d5..1f0e5e1bd 100644 --- a/frontend/components/dashboard/DashboardShareCodeModal.vue +++ b/frontend/components/dashboard/DashboardShareCodeModal.vue @@ -34,9 +34,7 @@ const isShared = computed(() => isSharedKey(sharedKey.value)) const path = computed(() => { const newRoute = router.resolve({ name: 'dashboard-id', - query: { - validators: fromBase64Url(sharedKey.value ?? ''), - }, + params: { id: sharedKey.value }, }) return url.origin + newRoute.fullPath }) From f02c32c608c15bf41e58e5447b0d46437ef3426a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 11:02:53 +0000 Subject: [PATCH 043/187] feat(api): make writing tests more convenient --- backend/go.mod | 2 + backend/go.sum | 7 +- backend/pkg/api/handlers/handlers_test.go | 163 +++++++++++------- backend/pkg/api/services/service.go | 14 +- .../service_average_network_efficiency.go | 6 +- .../pkg/api/services/service_email_sender.go | 7 +- backend/pkg/api/services/service_slot_viz.go | 6 +- .../api/services/service_validator_mapping.go | 7 +- 8 files changed, 140 insertions(+), 72 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index b82b96ef1..3b622725a 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -61,6 +61,7 @@ require ( github.com/rocket-pool/smartnode v1.13.6 github.com/shopspring/decimal v1.3.1 github.com/sirupsen/logrus v1.9.3 + github.com/stretchr/testify v1.9.0 github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d github.com/wealdtech/go-ens/v3 v3.6.0 github.com/wealdtech/go-eth2-types/v2 v2.8.2 @@ -194,6 +195,7 @@ require ( github.com/paulmach/orb v0.10.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/polydawn/refmt v0.89.0 // indirect github.com/prometheus/client_model v0.6.0 // indirect github.com/prometheus/common v0.47.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index c41a5f688..f16d0bc61 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -840,8 +840,9 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -852,8 +853,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go index 5cd7128cc..6e942d3af 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/handlers/handlers_test.go @@ -1,10 +1,12 @@ package handlers_test import ( + "bytes" "encoding/json" "flag" "io" "net/http" + "net/http/cookiejar" "net/http/httptest" "os" "testing" @@ -16,12 +18,55 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/commons/version" - "github.com/gorilla/mux" + "github.com/stretchr/testify/assert" ) -var router *mux.Router +var ts *testServer var dataAccessor dataaccess.DataAccessor +type testServer struct { + *httptest.Server +} + +// Implement a get() method on our custom testServer type. This makes a GET +// request to a given url path using the test server client, and returns the +// response status code, headers and body. +func (ts *testServer) get(t *testing.T, urlPath string) (int, http.Header, string) { + rs, err := ts.Client().Get(ts.URL + urlPath) + if err != nil { + t.Fatal(err) + } + defer rs.Body.Close() + body, err := io.ReadAll(rs.Body) + if err != nil { + t.Fatal(err) + } + bytes.TrimSpace(body) + return rs.StatusCode, rs.Header, string(body) +} + +func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, http.Header, string) { + rs, err := ts.Client().Post(ts.URL+urlPath, "application/json", data) + if err != nil { + t.Fatal(err) + } + defer rs.Body.Close() + body, err := io.ReadAll(rs.Body) + if err != nil { + t.Fatal(err) + } + bytes.TrimSpace(body) + return rs.StatusCode, rs.Header, string(body) +} + +func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.ApiErrorResponse { + resp := api_types.ApiErrorResponse{} + if err := json.Unmarshal([]byte(body), &resp); err != nil { + t.Fatal(err) + } + return resp +} + func TestMain(m *testing.M) { setup() defer teardown() @@ -34,6 +79,7 @@ func TestMain(m *testing.M) { func teardown() { dataAccessor.Close() + ts.Close() } func setup() { @@ -51,89 +97,84 @@ func setup() { log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") dataAccessor = dataaccess.NewDataAccessService(cfg) - router = api.NewApiRouter(dataAccessor, cfg) -} - -func TestInternalGetProductSummaryHandler(t *testing.T) { - w := httptest.NewRecorder() - req := httptest.NewRequest(http.MethodGet, "/api/i/product-summary", nil) + router := api.NewApiRouter(dataAccessor, cfg) - router.ServeHTTP(w, req) + ts = &testServer{httptest.NewTLSServer(router)} - resp := w.Result() - defer resp.Body.Close() + jar, _ := cookiejar.New(nil) + ts.Server.Client().Jar = jar +} - if resp.StatusCode != http.StatusOK { - t.Fatalf("expected status code 200, got %d", resp.StatusCode) - } +func TestInternalGetProductSummaryHandler(t *testing.T) { + code, _, body := ts.get(t, "/api/i/product-summary") - data, err := io.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } + assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetProductSummaryResponse{} - err = json.Unmarshal(data, &respData) + err := json.Unmarshal([]byte(body), &respData) if err != nil { - log.Infof("%s", string(data)) + log.Infof("%s", body) t.Fatal(err) } - if respData.Data.ValidatorsPerDashboardLimit == 0 { - t.Fatal("ValidatorsPerDashboardLimit is 0") - } - - if len(respData.Data.ApiProducts) == 0 { - t.Fatal("ApiProducts length is 0") - } + assert.NotEqual(t, 0, respData.Data.ValidatorsPerDashboardLimit, "ValidatorsPerDashboardLimit should not be 0") + assert.NotEqual(t, 0, len(respData.Data.ApiProducts), "ApiProducts should not be empty") + assert.NotEqual(t, 0, len(respData.Data.ExtraDashboardValidatorsPremiumAddon), "ExtraDashboardValidatorsPremiumAddon should not be empty") + assert.NotEqual(t, 0, len(respData.Data.PremiumProducts), "PremiumProducts should not be empty") } func TestInternalGetLatestStateHandler(t *testing.T) { - req := httptest.NewRequest(http.MethodGet, "/api/i/latest-state", nil) - - w := httptest.NewRecorder() - - router.ServeHTTP(w, req) - - resp := w.Result() - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - t.Fatalf("expected status code 200, got %d", resp.StatusCode) - } - - data, err := io.ReadAll(resp.Body) - - if err != nil { - t.Fatal(err) - } + code, _, body := ts.get(t, "//api/i/latest-state") + assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetLatestStateResponse{} - err = json.Unmarshal(data, &respData) - if err != nil { + if err := json.Unmarshal([]byte(body), &respData); err != nil { t.Fatal(err) } - if respData.Data.LatestSlot == 0 { - t.Fatal("latest slot is 0") - } - - if respData.Data.FinalizedEpoch == 0 { - t.Fatal("finalized epoch is 0") - } + assert.NotEqual(t, uint64(0), respData.Data.LatestSlot, "latest slot should not be 0") + assert.NotEqual(t, uint64(0), respData.Data.FinalizedEpoch, "finalized epoch should not be 0") } func TestInternalPostAdConfigurationsHandler(t *testing.T) { - req := httptest.NewRequest(http.MethodPost, "/api/i/ad-configurations", nil) + code, _, body := ts.get(t, "/api/i/ad-configurations") + assert.Equal(t, http.StatusUnauthorized, code) + + resp := ts.parseErrorResonse(t, body) + assert.Equal(t, "unauthorized: not authenticated", resp.Error) - w := httptest.NewRecorder() + // login + code, _, body = ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin@admin.com", "password": "admin"}`))) + assert.Equal(t, http.StatusNotFound, code) + resp = ts.parseErrorResonse(t, body) + assert.Equal(t, "not found: user not found", resp.Error) +} - router.ServeHTTP(w, req) +func TestInternalLoginHandler(t *testing.T) { + // login with email in wrong format + code, _, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) + assert.Equal(t, http.StatusBadRequest, code) + resp := ts.parseErrorResonse(t, body) + assert.Equal(t, "email: given value 'admin' has incorrect format", resp.Error, "unexpected error message") + + // login with wrong user + code, _, body = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) + assert.Equal(t, http.StatusNotFound, code) + resp = ts.parseErrorResonse(t, body) + assert.Equal(t, "not found: user not found", resp.Error, "unexpected error message") // TODO: this should not return the same error as a user with a wrong password +} - resp := w.Result() - defer resp.Body.Close() +func TestInternalSearchHandler(t *testing.T) { + // search for validator with index 5 + code, _, body := ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"5","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + assert.Equal(t, 200, code) - if resp.StatusCode != http.StatusUnauthorized { - t.Fatalf("expected status code 401, got %d", resp.StatusCode) + resp := api_types.InternalPostSearchResponse{} + if err := json.Unmarshal([]byte(body), &resp); err != nil { + t.Fatal(err) } + + assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") + assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") + assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") } diff --git a/backend/pkg/api/services/service.go b/backend/pkg/api/services/service.go index 2e009e85c..872e3391b 100644 --- a/backend/pkg/api/services/service.go +++ b/backend/pkg/api/services/service.go @@ -1,6 +1,8 @@ package services import ( + "sync" + "github.com/go-redis/redis/v8" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" @@ -35,11 +37,15 @@ func NewServices(readerDb, writerDb, alloyReader, alloyWriter, clickhouseReader } func (s *Services) InitServices() { - go s.startSlotVizDataService() - go s.startIndexMappingService() - go s.startEfficiencyDataService() - go s.startEmailSenderService() + wg := &sync.WaitGroup{} + log.Infof("initializing services...") + wg.Add(4) + go s.startSlotVizDataService(wg) + go s.startIndexMappingService(wg) + go s.startEfficiencyDataService(wg) + go s.startEmailSenderService(wg) + wg.Wait() log.Infof("initializing prices...") price.Init(utils.Config.Chain.ClConfig.DepositChainID, utils.Config.Eth1ErigonEndpoint, utils.Config.Frontend.ClCurrency, utils.Config.Frontend.ElCurrency) log.Infof("...prices initialized") diff --git a/backend/pkg/api/services/service_average_network_efficiency.go b/backend/pkg/api/services/service_average_network_efficiency.go index 725ff0511..083b06dc2 100644 --- a/backend/pkg/api/services/service_average_network_efficiency.go +++ b/backend/pkg/api/services/service_average_network_efficiency.go @@ -22,7 +22,8 @@ import ( var currentEfficiencyInfo atomic.Pointer[EfficiencyData] -func (s *Services) startEfficiencyDataService() { +func (s *Services) startEfficiencyDataService(wg *sync.WaitGroup) { + o := sync.Once{} for { startTime := time.Now() delay := time.Duration(utils.Config.Chain.ClConfig.SlotsPerEpoch*utils.Config.Chain.ClConfig.SecondsPerSlot) * time.Second @@ -36,6 +37,9 @@ func (s *Services) startEfficiencyDataService() { } else { log.Infof("=== average network efficiency data updated in %s", time.Since(startTime)) r(constants.Success, map[string]string{"took": time.Since(startTime).String()}) + o.Do(func() { + wg.Done() + }) } utils.ConstantTimeDelay(startTime, delay) } diff --git a/backend/pkg/api/services/service_email_sender.go b/backend/pkg/api/services/service_email_sender.go index 4bf0bee69..fa8cf61e2 100644 --- a/backend/pkg/api/services/service_email_sender.go +++ b/backend/pkg/api/services/service_email_sender.go @@ -1,6 +1,7 @@ package services import ( + "sync" "time" "github.com/gobitfly/beaconchain/pkg/commons/log" @@ -29,7 +30,8 @@ var Queue []QueuedEmail // collects & queues mails, sends in batches regularly (possibly aggregating multiple messasages to the same user to avoid spam?) // TODO ratelimiting // TODO send via SMTP/mailgun/others? -func (s *Services) startEmailSenderService() { +func (s *Services) startEmailSenderService(wg *sync.WaitGroup) { + o := sync.Once{} for { startTime := time.Now() // lock mutex @@ -44,6 +46,9 @@ func (s *Services) startEmailSenderService() { }*/ } log.Infof("=== message sending done in %s", time.Since(startTime)) + o.Do(func() { + wg.Done() + }) utils.ConstantTimeDelay(startTime, 30*time.Second) } } diff --git a/backend/pkg/api/services/service_slot_viz.go b/backend/pkg/api/services/service_slot_viz.go index e26145445..9b7af5031 100644 --- a/backend/pkg/api/services/service_slot_viz.go +++ b/backend/pkg/api/services/service_slot_viz.go @@ -27,7 +27,8 @@ import ( var currentDutiesInfo atomic.Pointer[SyncData] -func (s *Services) startSlotVizDataService() { +func (s *Services) startSlotVizDataService(wg *sync.WaitGroup) { + o := sync.Once{} for { startTime := time.Now() delay := time.Duration(utils.Config.Chain.ClConfig.SecondsPerSlot) * time.Second @@ -40,6 +41,9 @@ func (s *Services) startSlotVizDataService() { } log.Infof("=== slotviz data updated in %s", time.Since(startTime)) r(constants.Success, map[string]string{"took": time.Since(startTime).String()}) + o.Do(func() { + wg.Done() + }) utils.ConstantTimeDelay(startTime, delay) } } diff --git a/backend/pkg/api/services/service_validator_mapping.go b/backend/pkg/api/services/service_validator_mapping.go index 374c6906a..187c41a14 100644 --- a/backend/pkg/api/services/service_validator_mapping.go +++ b/backend/pkg/api/services/service_validator_mapping.go @@ -5,6 +5,7 @@ import ( "context" "encoding/gob" "fmt" + "sync" "sync/atomic" "time" @@ -33,8 +34,9 @@ var _cachedRedisValidatorMapping = new(types.RedisCachedValidatorsMapping) var lastEpochUpdate = uint64(0) -func (s *Services) startIndexMappingService() { +func (s *Services) startIndexMappingService(wg *sync.WaitGroup) { var err error + o := sync.Once{} for { startTime := time.Now() delay := time.Duration(utils.Config.Chain.ClConfig.SecondsPerSlot) * time.Second @@ -53,6 +55,9 @@ func (s *Services) startIndexMappingService() { log.Infof("=== validator mapping updated in %s", time.Since(startTime)) r(constants.Success, map[string]string{"took": time.Since(startTime).String(), "latest_epoch": fmt.Sprintf("%d", lastEpochUpdate)}) lastEpochUpdate = latestEpoch + o.Do(func() { + wg.Done() + }) } utils.ConstantTimeDelay(startTime, delay) } From 662e66483482d920baadced97fe0eba51ec5dfa2 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:06:39 +0200 Subject: [PATCH 044/187] (BEDS-407) login: don't reveal email doesn't exist (#818) --- backend/pkg/api/handlers/auth.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/backend/pkg/api/handlers/auth.go b/backend/pkg/api/handlers/auth.go index 1bd6e3bd7..62e55e23c 100644 --- a/backend/pkg/api/handlers/auth.go +++ b/backend/pkg/api/handlers/auth.go @@ -443,6 +443,9 @@ func (h *HandlerService) InternalPostLogin(w http.ResponseWriter, r *http.Reques // fetch user userId, err := h.dai.GetUserByEmail(r.Context(), email) if err != nil { + if errors.Is(err, dataaccess.ErrNotFound) { + err = errBadCredentials + } handleErr(w, r, err) return } From f7842cac400cb2b58c76bf816318c0b13f04d7eb Mon Sep 17 00:00:00 2001 From: RamiRond Date: Tue, 3 Sep 2024 14:38:49 +0200 Subject: [PATCH 045/187] CR comments --- backend/pkg/api/handlers/internal.go | 2 +- backend/pkg/api/handlers/public.go | 2 +- backend/pkg/api/router.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 89fbd85da..37c35491a 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -831,7 +831,7 @@ func (h *HandlerService) InternalDeleteValidatorDashboardValidators(w http.Respo handleErr(w, r, err) return } - indices, publicKeys = v.checkValidators(req.Validators, allowEmpty) + indices, publicKeys = v.checkValidators(req.Validators, false) if v.hasErrors() { handleErr(w, r, v) return diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 39c0cf719..4c2e1152a 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -277,7 +277,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons handleErr(w, r, err) return } - indices, publicKeys = v.checkValidators(req.Validators, allowEmpty) + indices, publicKeys = v.checkValidators(req.Validators, false) if v.hasErrors() { handleErr(w, r, v) return diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index e3ce14420..a21d346ae 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -271,7 +271,7 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte {http.MethodDelete, "/{dashboard_id}/groups/{group_id}", hs.PublicDeleteValidatorDashboardGroup, hs.InternalDeleteValidatorDashboardGroup}, {http.MethodPost, "/{dashboard_id}/validators", hs.PublicPostValidatorDashboardValidators, hs.InternalPostValidatorDashboardValidators}, {http.MethodGet, "/{dashboard_id}/validators", hs.PublicGetValidatorDashboardValidators, hs.InternalGetValidatorDashboardValidators}, - {http.MethodDelete, "/{dashboard_id}/validators", hs.PublicDeleteValidatorDashboardValidators, hs.InternalDeleteValidatorDashboardValidators}, + {http.MethodPost, "/{dashboard_id}/validators/bulk-deletions", hs.PublicDeleteValidatorDashboardValidators, hs.InternalDeleteValidatorDashboardValidators}, {http.MethodPost, "/{dashboard_id}/public-ids", hs.PublicPostValidatorDashboardPublicIds, hs.InternalPostValidatorDashboardPublicIds}, {http.MethodPut, "/{dashboard_id}/public-ids/{public_id}", hs.PublicPutValidatorDashboardPublicId, hs.InternalPutValidatorDashboardPublicId}, {http.MethodDelete, "/{dashboard_id}/public-ids/{public_id}", hs.PublicDeleteValidatorDashboardPublicId, hs.InternalDeleteValidatorDashboardPublicId}, From 9974bb7080b35681560a7fed222bf2a9235a4752 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 14:42:16 +0200 Subject: [PATCH 046/187] (BEDS-409) deposit endpoints: remove search param from (#819) --- backend/pkg/api/data_access/dummy.go | 4 ++-- backend/pkg/api/data_access/vdb.go | 4 ++-- backend/pkg/api/data_access/vdb_deposits.go | 4 ++-- backend/pkg/api/handlers/public.go | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 6f538dc95..4cb7f7bdf 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -346,11 +346,11 @@ func (d *DummyService) GetValidatorDashboardGroupHeatmap(ctx context.Context, da return getDummyStruct[t.VDBHeatmapTooltipData]() } -func (d *DummyService) GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) { +func (d *DummyService) GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) { return getDummyWithPaging[t.VDBExecutionDepositsTableRow]() } -func (d *DummyService) GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) { +func (d *DummyService) GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) { return getDummyWithPaging[t.VDBConsensusDepositsTableRow]() } diff --git a/backend/pkg/api/data_access/vdb.go b/backend/pkg/api/data_access/vdb.go index d922127be..49d710873 100644 --- a/backend/pkg/api/data_access/vdb.go +++ b/backend/pkg/api/data_access/vdb.go @@ -67,8 +67,8 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardHeatmap(ctx context.Context, dashboardId t.VDBId, protocolModes t.VDBProtocolModes, aggregation enums.ChartAggregation, afterTs uint64, beforeTs uint64) (*t.VDBHeatmap, error) GetValidatorDashboardGroupHeatmap(ctx context.Context, dashboardId t.VDBId, groupId uint64, protocolModes t.VDBProtocolModes, aggregation enums.ChartAggregation, timestamp uint64) (*t.VDBHeatmapTooltipData, error) - GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) - GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) + GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) + GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) GetValidatorDashboardTotalElDeposits(ctx context.Context, dashboardId t.VDBId) (*t.VDBTotalExecutionDepositsData, error) GetValidatorDashboardTotalClDeposits(ctx context.Context, dashboardId t.VDBId) (*t.VDBTotalConsensusDepositsData, error) diff --git a/backend/pkg/api/data_access/vdb_deposits.go b/backend/pkg/api/data_access/vdb_deposits.go index c8a4d5b14..7d32cc002 100644 --- a/backend/pkg/api/data_access/vdb_deposits.go +++ b/backend/pkg/api/data_access/vdb_deposits.go @@ -19,7 +19,7 @@ import ( "github.com/shopspring/decimal" ) -func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) { +func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBExecutionDepositsTableRow, *t.Paging, error) { var err error currentDirection := enums.DESC // TODO: expose over parameter var currentCursor t.ELDepositsCursor @@ -177,7 +177,7 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, return responseData, p, nil } -func (d *DataAccessService) GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, search string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) { +func (d *DataAccessService) GetValidatorDashboardClDeposits(ctx context.Context, dashboardId t.VDBId, cursor string, limit uint64) ([]t.VDBConsensusDepositsTableRow, *t.Paging, error) { var err error currentDirection := enums.DESC // TODO: expose over parameter var currentCursor t.CLDepositsCursor diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 28d17bb39..caf69c31f 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -1149,7 +1149,7 @@ func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.search, pagingParams.limit) + data, paging, err := h.dai.GetValidatorDashboardElDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return @@ -1174,7 +1174,7 @@ func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w htt return } - data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.search, pagingParams.limit) + data, paging, err := h.dai.GetValidatorDashboardClDeposits(r.Context(), *dashboardId, pagingParams.cursor, pagingParams.limit) if err != nil { handleErr(w, r, err) return From dd3e084d52f7f08cc5596415b4e825f31b0d0e01 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 13:20:12 +0000 Subject: [PATCH 047/187] feat(api): improve testing scaffolding --- backend/go.mod | 4 +- backend/go.sum | 4 + backend/pkg/api/auth.go | 1 + backend/pkg/api/handlers/handlers_test.go | 133 ++++++++++++++++++---- 4 files changed, 117 insertions(+), 25 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index 3b622725a..8e2ae391c 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -21,6 +21,7 @@ require ( github.com/donovanhide/eventsource v0.0.0-20210830082556-c59027999da0 github.com/doug-martin/goqu/v9 v9.19.0 github.com/ethereum/go-ethereum v1.13.12 + github.com/fergusstrange/embedded-postgres v1.29.0 github.com/go-faker/faker/v4 v4.3.0 github.com/go-redis/redis/v8 v8.11.5 github.com/gobitfly/eth-rewards v0.1.2-0.20230403064929-411ddc40a5f7 @@ -69,6 +70,7 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.19.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a + golang.org/x/net v0.21.0 golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 @@ -222,6 +224,7 @@ require ( github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect @@ -233,7 +236,6 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/backend/go.sum b/backend/go.sum index f16d0bc61..f3e0980f2 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -237,6 +237,8 @@ github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4 github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fergusstrange/embedded-postgres v1.29.0 h1:Uv8hdhoiaNMuH0w8UuGXDHr60VoAQPFdgx7Qf3bzXJM= +github.com/fergusstrange/embedded-postgres v1.29.0/go.mod h1:t/MLs0h9ukYM6FSt99R7InCHs1nW0ordoVCcnzmpTYw= github.com/ferranbt/fastssz v0.1.3 h1:ZI+z3JH05h4kgmFXdHuR1aWYsgrg7o+Fw7/NCzM16Mo= github.com/ferranbt/fastssz v0.1.3/go.mod h1:0Y9TEd/9XuFlh7mskMPfXiI2Dkw4Ddg9EyXt1W7MRvE= github.com/fjl/memsize v0.0.2 h1:27txuSD9or+NZlnOWdKUxeBzTAUkWCVh+4Gf2dWFOzA= @@ -915,6 +917,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/ydb-platform/ydb-go-genproto v0.0.0-20240126124512-dbb0e1720dbf h1:ckwNHVo4bv2tqNkgx3W3HANh3ta1j6TR5qw08J1A7Tw= diff --git a/backend/pkg/api/auth.go b/backend/pkg/api/auth.go index dc8a62814..048c90865 100644 --- a/backend/pkg/api/auth.go +++ b/backend/pkg/api/auth.go @@ -35,6 +35,7 @@ func newSessionManager(cfg *types.Config) *scs.SessionManager { secure = true } scs.Cookie.Secure = secure + log.Info("Session cookie secure:", secure) scs.Cookie.SameSite = sameSite scs.Store = redisstore.New(pool) diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go index 6e942d3af..84f826ec3 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/handlers/handlers_test.go @@ -9,8 +9,11 @@ import ( "net/http/cookiejar" "net/http/httptest" "os" + "os/exec" "testing" + "time" + embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/gobitfly/beaconchain/pkg/api" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" api_types "github.com/gobitfly/beaconchain/pkg/api/types" @@ -18,11 +21,15 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/commons/version" + "github.com/jmoiron/sqlx" + "github.com/pressly/goose/v3" "github.com/stretchr/testify/assert" + "golang.org/x/crypto/bcrypt" ) var ts *testServer var dataAccessor dataaccess.DataAccessor +var postgres *embeddedpostgres.EmbeddedPostgres type testServer struct { *httptest.Server @@ -69,17 +76,19 @@ func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.Api func TestMain(m *testing.M) { setup() - defer teardown() + code := m.Run() + teardown() // wait till service initialization is completed (TODO: find a better way to do this) // time.Sleep(30 * time.Second) - os.Exit(m.Run()) + os.Exit(code) } func teardown() { dataAccessor.Close() ts.Close() + postgres.Stop() } func setup() { @@ -87,11 +96,50 @@ func setup() { flag.Parse() + _ = exec.Command("pkill", "-9", "postgres").Run() + postgres = embeddedpostgres.NewDatabase(embeddedpostgres.DefaultConfig().Username("postgres")) + err := postgres.Start() + if err != nil { + log.Fatal(err, "error starting embedded postgres", 0) + } + + tempDb, err := sqlx.Connect("postgres", "host=localhost port=5432 user=postgres password=postgres dbname=postgres sslmode=disable") + if err != nil { + log.Fatal(err, "error connection to test db", 0) + } + + if err := goose.Up(tempDb.DB, "../../../pkg/commons/db/migrations/postgres"); err != nil { + log.Fatal(err, "error running migrations", 0) + } + + // insert dummy user for testing (email: admin@admin, password: admin) + pHash, _ := bcrypt.GenerateFromPassword([]byte("admin"), 10) + _, err = tempDb.Exec(` + INSERT INTO users (password, email, register_ts, api_key, email_confirmed) + VALUES ($1, $2, TO_TIMESTAMP($3), $4, $5)`, + string(pHash), "admin@admin.com", time.Now().Unix(), "admin", true, + ) + if err != nil { + log.Fatal(err, "error inserting user", 0) + } + cfg := &types.Config{} - err := utils.ReadConfig(cfg, *configPath) + err = utils.ReadConfig(cfg, *configPath) if err != nil { log.Fatal(err, "error reading config file", 0) } + cfg.Frontend.ReaderDatabase.Host = "localhost" + cfg.Frontend.ReaderDatabase.Port = "5432" + cfg.Frontend.ReaderDatabase.Name = "postgres" + cfg.Frontend.ReaderDatabase.Password = "postgres" + cfg.Frontend.ReaderDatabase.Username = "postgres" + + cfg.Frontend.WriterDatabase.Host = "localhost" + cfg.Frontend.WriterDatabase.Port = "5432" + cfg.Frontend.WriterDatabase.Name = "postgres" + cfg.Frontend.WriterDatabase.Password = "postgres" + cfg.Frontend.WriterDatabase.Username = "postgres" + utils.Config = cfg log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") @@ -100,8 +148,11 @@ func setup() { router := api.NewApiRouter(dataAccessor, cfg) ts = &testServer{httptest.NewTLSServer(router)} - - jar, _ := cookiejar.New(nil) + log.Info(ts.URL) + jar, err := cookiejar.New(nil) + if err != nil { + log.Fatal(err, "error creating cookie jar", 0) + } ts.Server.Client().Jar = jar } @@ -124,7 +175,7 @@ func TestInternalGetProductSummaryHandler(t *testing.T) { } func TestInternalGetLatestStateHandler(t *testing.T) { - code, _, body := ts.get(t, "//api/i/latest-state") + code, _, body := ts.get(t, "/api/i/latest-state") assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetLatestStateResponse{} @@ -136,20 +187,6 @@ func TestInternalGetLatestStateHandler(t *testing.T) { assert.NotEqual(t, uint64(0), respData.Data.FinalizedEpoch, "finalized epoch should not be 0") } -func TestInternalPostAdConfigurationsHandler(t *testing.T) { - code, _, body := ts.get(t, "/api/i/ad-configurations") - assert.Equal(t, http.StatusUnauthorized, code) - - resp := ts.parseErrorResonse(t, body) - assert.Equal(t, "unauthorized: not authenticated", resp.Error) - - // login - code, _, body = ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin@admin.com", "password": "admin"}`))) - assert.Equal(t, http.StatusNotFound, code) - resp = ts.parseErrorResonse(t, body) - assert.Equal(t, "not found: user not found", resp.Error) -} - func TestInternalLoginHandler(t *testing.T) { // login with email in wrong format code, _, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) @@ -157,11 +194,33 @@ func TestInternalLoginHandler(t *testing.T) { resp := ts.parseErrorResonse(t, body) assert.Equal(t, "email: given value 'admin' has incorrect format", resp.Error, "unexpected error message") - // login with wrong user - code, _, body = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) - assert.Equal(t, http.StatusNotFound, code) + // login with correct user and wrong password + code, _, body = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "wrong"}`)) + assert.Equal(t, http.StatusUnauthorized, code, "login should not be successful") resp = ts.parseErrorResonse(t, body) - assert.Equal(t, "not found: user not found", resp.Error, "unexpected error message") // TODO: this should not return the same error as a user with a wrong password + assert.Equal(t, "unauthorized: invalid email or password", resp.Error, "unexpected error message") + + // login with correct user and password + code, _, _ = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) + assert.Equal(t, http.StatusOK, code, "login should be successful") + + // check if user is logged in and has a valid session + code, _, body = ts.get(t, "/api/i/users/me") + assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") + meResponse := &api_types.InternalGetUserInfoResponse{} + if err := json.Unmarshal([]byte(body), meResponse); err != nil { + t.Fatal(err) + } + // check if email is censored + assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") + + // check if logout works + code, _, _ = ts.post(t, "/api/i/logout", bytes.NewBufferString(``)) + assert.Equal(t, http.StatusOK, code, "logout should be successful") + + // check if user is logged out + code, _, _ = ts.get(t, "/api/i/users/me") + assert.Equal(t, http.StatusUnauthorized, code, "call to users/me should be unauthorized") } func TestInternalSearchHandler(t *testing.T) { @@ -177,4 +236,30 @@ func TestInternalSearchHandler(t *testing.T) { assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") + + // search for validator by pubkey + code, _, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + assert.Equal(t, 200, code) + + resp = api_types.InternalPostSearchResponse{} + if err := json.Unmarshal([]byte(body), &resp); err != nil { + t.Fatal(err) + } + + assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") + assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") + assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") + + // search for validator by withdawal address + code, _, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + assert.Equal(t, 200, code) + + resp = api_types.InternalPostSearchResponse{} + if err := json.Unmarshal([]byte(body), &resp); err != nil { + t.Fatal(err) + } + + assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") + assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") + assert.Greater(t, *resp.Data[0].NumValue, uint64(0), "returned number of validators should be greater than 0") } From e481b07adc4914170e762faf5c352b8260d18926 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 13:24:53 +0000 Subject: [PATCH 048/187] feat(api): simplify json parsing in tests --- backend/pkg/api/handlers/handlers_test.go | 38 +++++++++++------------ 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go index 84f826ec3..10249c6a9 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/handlers/handlers_test.go @@ -96,13 +96,17 @@ func setup() { flag.Parse() + // terminate any currently running postgres instances _ = exec.Command("pkill", "-9", "postgres").Run() + + // start embedded postgres postgres = embeddedpostgres.NewDatabase(embeddedpostgres.DefaultConfig().Username("postgres")) err := postgres.Start() if err != nil { log.Fatal(err, "error starting embedded postgres", 0) } + // connection the the embedded db and run migrations tempDb, err := sqlx.Connect("postgres", "host=localhost port=5432 user=postgres password=postgres dbname=postgres sslmode=disable") if err != nil { log.Fatal(err, "error connection to test db", 0) @@ -128,6 +132,8 @@ func setup() { if err != nil { log.Fatal(err, "error reading config file", 0) } + + // hardcode db connection details for testing cfg.Frontend.ReaderDatabase.Host = "localhost" cfg.Frontend.ReaderDatabase.Port = "5432" cfg.Frontend.ReaderDatabase.Name = "postgres" @@ -148,7 +154,6 @@ func setup() { router := api.NewApiRouter(dataAccessor, cfg) ts = &testServer{httptest.NewTLSServer(router)} - log.Info(ts.URL) jar, err := cookiejar.New(nil) if err != nil { log.Fatal(err, "error creating cookie jar", 0) @@ -163,10 +168,7 @@ func TestInternalGetProductSummaryHandler(t *testing.T) { respData := api_types.InternalGetProductSummaryResponse{} err := json.Unmarshal([]byte(body), &respData) - if err != nil { - log.Infof("%s", body) - t.Fatal(err) - } + assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, respData.Data.ValidatorsPerDashboardLimit, "ValidatorsPerDashboardLimit should not be 0") assert.NotEqual(t, 0, len(respData.Data.ApiProducts), "ApiProducts should not be empty") @@ -179,9 +181,8 @@ func TestInternalGetLatestStateHandler(t *testing.T) { assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetLatestStateResponse{} - if err := json.Unmarshal([]byte(body), &respData); err != nil { - t.Fatal(err) - } + err := json.Unmarshal([]byte(body), &respData) + assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, uint64(0), respData.Data.LatestSlot, "latest slot should not be 0") assert.NotEqual(t, uint64(0), respData.Data.FinalizedEpoch, "finalized epoch should not be 0") @@ -208,9 +209,9 @@ func TestInternalLoginHandler(t *testing.T) { code, _, body = ts.get(t, "/api/i/users/me") assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") meResponse := &api_types.InternalGetUserInfoResponse{} - if err := json.Unmarshal([]byte(body), meResponse); err != nil { - t.Fatal(err) - } + err := json.Unmarshal([]byte(body), meResponse) + assert.Nil(t, err, "error unmarshalling response") + // check if email is censored assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") @@ -229,9 +230,8 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, 200, code) resp := api_types.InternalPostSearchResponse{} - if err := json.Unmarshal([]byte(body), &resp); err != nil { - t.Fatal(err) - } + err := json.Unmarshal([]byte(body), &resp) + assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") @@ -242,9 +242,8 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, 200, code) resp = api_types.InternalPostSearchResponse{} - if err := json.Unmarshal([]byte(body), &resp); err != nil { - t.Fatal(err) - } + err = json.Unmarshal([]byte(body), &resp) + assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") @@ -255,9 +254,8 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, 200, code) resp = api_types.InternalPostSearchResponse{} - if err := json.Unmarshal([]byte(body), &resp); err != nil { - t.Fatal(err) - } + err = json.Unmarshal([]byte(body), &resp) + assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") From 6d07c0c04c2c5d2bad1c651991d0fe76f50708bd Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 13:26:49 +0000 Subject: [PATCH 049/187] chore(api): improve test formatting --- backend/pkg/api/handlers/handlers_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go index 10249c6a9..bfa37d5ce 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/handlers/handlers_test.go @@ -169,7 +169,6 @@ func TestInternalGetProductSummaryHandler(t *testing.T) { respData := api_types.InternalGetProductSummaryResponse{} err := json.Unmarshal([]byte(body), &respData) assert.Nil(t, err, "error unmarshalling response") - assert.NotEqual(t, 0, respData.Data.ValidatorsPerDashboardLimit, "ValidatorsPerDashboardLimit should not be 0") assert.NotEqual(t, 0, len(respData.Data.ApiProducts), "ApiProducts should not be empty") assert.NotEqual(t, 0, len(respData.Data.ExtraDashboardValidatorsPremiumAddon), "ExtraDashboardValidatorsPremiumAddon should not be empty") @@ -183,7 +182,6 @@ func TestInternalGetLatestStateHandler(t *testing.T) { respData := api_types.InternalGetLatestStateResponse{} err := json.Unmarshal([]byte(body), &respData) assert.Nil(t, err, "error unmarshalling response") - assert.NotEqual(t, uint64(0), respData.Data.LatestSlot, "latest slot should not be 0") assert.NotEqual(t, uint64(0), respData.Data.FinalizedEpoch, "finalized epoch should not be 0") } @@ -208,10 +206,10 @@ func TestInternalLoginHandler(t *testing.T) { // check if user is logged in and has a valid session code, _, body = ts.get(t, "/api/i/users/me") assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") + meResponse := &api_types.InternalGetUserInfoResponse{} err := json.Unmarshal([]byte(body), meResponse) assert.Nil(t, err, "error unmarshalling response") - // check if email is censored assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") @@ -232,7 +230,6 @@ func TestInternalSearchHandler(t *testing.T) { resp := api_types.InternalPostSearchResponse{} err := json.Unmarshal([]byte(body), &resp) assert.Nil(t, err, "error unmarshalling response") - assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") @@ -244,7 +241,6 @@ func TestInternalSearchHandler(t *testing.T) { resp = api_types.InternalPostSearchResponse{} err = json.Unmarshal([]byte(body), &resp) assert.Nil(t, err, "error unmarshalling response") - assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") @@ -256,7 +252,6 @@ func TestInternalSearchHandler(t *testing.T) { resp = api_types.InternalPostSearchResponse{} err = json.Unmarshal([]byte(body), &resp) assert.Nil(t, err, "error unmarshalling response") - assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Greater(t, *resp.Data[0].NumValue, uint64(0), "returned number of validators should be greater than 0") From c88ec7e06fc9a00e9004a8ecc4d8bb5c372d17a6 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Tue, 3 Sep 2024 15:28:17 +0200 Subject: [PATCH 050/187] feat:Set HTML attributes to prevent popups from third-party apps. BEDS-368 --- frontend/components/bc/searchbar/SearchbarMain.vue | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/components/bc/searchbar/SearchbarMain.vue b/frontend/components/bc/searchbar/SearchbarMain.vue index d19c6292b..9e0c7240f 100644 --- a/frontend/components/bc/searchbar/SearchbarMain.vue +++ b/frontend/components/bc/searchbar/SearchbarMain.vue @@ -847,6 +847,7 @@ function informationIfHiddenResults(): string { :placeholder="t(SearchbarPurposeInfo[barPurpose].placeHolder)" @keyup="(e) => handleKeyPressInTextField(e.key)" @focus="globalState.showDropdown = true" + autocomplete="off" > Date: Tue, 3 Sep 2024 15:41:24 +0200 Subject: [PATCH 051/187] Fix lint comment. --- frontend/components/bc/searchbar/SearchbarMain.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/components/bc/searchbar/SearchbarMain.vue b/frontend/components/bc/searchbar/SearchbarMain.vue index 9e0c7240f..587c513df 100644 --- a/frontend/components/bc/searchbar/SearchbarMain.vue +++ b/frontend/components/bc/searchbar/SearchbarMain.vue @@ -845,9 +845,9 @@ function informationIfHiddenResults(): string { :class="[barShape, colorTheme]" type="text" :placeholder="t(SearchbarPurposeInfo[barPurpose].placeHolder)" + autocomplete="off" @keyup="(e) => handleKeyPressInTextField(e.key)" @focus="globalState.showDropdown = true" - autocomplete="off" > Date: Tue, 3 Sep 2024 13:46:39 +0000 Subject: [PATCH 052/187] chore(api): remove debug logging --- backend/pkg/api/auth.go | 1 - backend/pkg/api/handlers/handlers_test.go | 36 ++++++++++++++++++++--- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/backend/pkg/api/auth.go b/backend/pkg/api/auth.go index 048c90865..dc8a62814 100644 --- a/backend/pkg/api/auth.go +++ b/backend/pkg/api/auth.go @@ -35,7 +35,6 @@ func newSessionManager(cfg *types.Config) *scs.SessionManager { secure = true } scs.Cookie.Secure = secure - log.Info("Session cookie secure:", secure) scs.Cookie.SameSite = sameSite scs.Store = redisstore.New(pool) diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go index bfa37d5ce..c11c9031a 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/handlers/handlers_test.go @@ -163,7 +163,6 @@ func setup() { func TestInternalGetProductSummaryHandler(t *testing.T) { code, _, body := ts.get(t, "/api/i/product-summary") - assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetProductSummaryResponse{} @@ -225,7 +224,7 @@ func TestInternalLoginHandler(t *testing.T) { func TestInternalSearchHandler(t *testing.T) { // search for validator with index 5 code, _, body := ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"5","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) - assert.Equal(t, 200, code) + assert.Equal(t, http.StatusOK, code) resp := api_types.InternalPostSearchResponse{} err := json.Unmarshal([]byte(body), &resp) @@ -236,7 +235,7 @@ func TestInternalSearchHandler(t *testing.T) { // search for validator by pubkey code, _, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) - assert.Equal(t, 200, code) + assert.Equal(t, http.StatusOK, code) resp = api_types.InternalPostSearchResponse{} err = json.Unmarshal([]byte(body), &resp) @@ -247,7 +246,7 @@ func TestInternalSearchHandler(t *testing.T) { // search for validator by withdawal address code, _, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) - assert.Equal(t, 200, code) + assert.Equal(t, http.StatusOK, code) resp = api_types.InternalPostSearchResponse{} err = json.Unmarshal([]byte(body), &resp) @@ -256,3 +255,32 @@ func TestInternalSearchHandler(t *testing.T) { assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Greater(t, *resp.Data[0].NumValue, uint64(0), "returned number of validators should be greater than 0") } + +func TestSlotVizHandler(t *testing.T) { + code, _, body := ts.get(t, "/api/i/validator-dashboards/NQ/slot-viz") + assert.Equal(t, http.StatusOK, code) + + resp := api_types.GetValidatorDashboardSlotVizResponse{} + err := json.Unmarshal([]byte(body), &resp) + assert.Nil(t, err, "error unmarshalling response") + assert.Equal(t, 4, len(resp.Data), "response data should contain the last 4 epochs") + + headStateCount := 0 + for _, epoch := range resp.Data { + + if epoch.State == "head" { // count the amount of head epochs returned, should be exactly 1 + headStateCount++ + } + attestationAssignments := 0 + assert.Equal(t, 32, len(epoch.Slots), "each epoch should contain 32 slots") + + for _, slot := range epoch.Slots { + if slot.Attestations != nil { // count the amount of attestation assignments for each epoch, should be exactly 1 + attestationAssignments++ + } + } + + assert.Equal(t, attestationAssignments, 1, "epoch should have exactly one attestation assignment") + } + assert.Equal(t, 1, headStateCount, "one of the last 4 epochs should be in head state") +} From 5c715ed14e8f1c8de2d8499af2b4764364cd1bd0 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 3 Sep 2024 15:54:02 +0200 Subject: [PATCH 053/187] chore(api): please linter --- backend/pkg/api/handlers/handlers_test.go | 38 ++++++++++++----------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/handlers/handlers_test.go index c11c9031a..42e928257 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/handlers/handlers_test.go @@ -38,7 +38,7 @@ type testServer struct { // Implement a get() method on our custom testServer type. This makes a GET // request to a given url path using the test server client, and returns the // response status code, headers and body. -func (ts *testServer) get(t *testing.T, urlPath string) (int, http.Header, string) { +func (ts *testServer) get(t *testing.T, urlPath string) (int, string) { rs, err := ts.Client().Get(ts.URL + urlPath) if err != nil { t.Fatal(err) @@ -49,10 +49,10 @@ func (ts *testServer) get(t *testing.T, urlPath string) (int, http.Header, strin t.Fatal(err) } bytes.TrimSpace(body) - return rs.StatusCode, rs.Header, string(body) + return rs.StatusCode, string(body) } -func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, http.Header, string) { +func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, string) { rs, err := ts.Client().Post(ts.URL+urlPath, "application/json", data) if err != nil { t.Fatal(err) @@ -63,7 +63,7 @@ func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, h t.Fatal(err) } bytes.TrimSpace(body) - return rs.StatusCode, rs.Header, string(body) + return rs.StatusCode, string(body) } func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.ApiErrorResponse { @@ -88,7 +88,10 @@ func TestMain(m *testing.M) { func teardown() { dataAccessor.Close() ts.Close() - postgres.Stop() + err := postgres.Stop() + if err != nil { + log.Error(err, "error stopping embedded postgres", 0) + } } func setup() { @@ -162,7 +165,7 @@ func setup() { } func TestInternalGetProductSummaryHandler(t *testing.T) { - code, _, body := ts.get(t, "/api/i/product-summary") + code, body := ts.get(t, "/api/i/product-summary") assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetProductSummaryResponse{} @@ -175,7 +178,7 @@ func TestInternalGetProductSummaryHandler(t *testing.T) { } func TestInternalGetLatestStateHandler(t *testing.T) { - code, _, body := ts.get(t, "/api/i/latest-state") + code, body := ts.get(t, "/api/i/latest-state") assert.Equal(t, http.StatusOK, code) respData := api_types.InternalGetLatestStateResponse{} @@ -187,23 +190,23 @@ func TestInternalGetLatestStateHandler(t *testing.T) { func TestInternalLoginHandler(t *testing.T) { // login with email in wrong format - code, _, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) + code, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) assert.Equal(t, http.StatusBadRequest, code) resp := ts.parseErrorResonse(t, body) assert.Equal(t, "email: given value 'admin' has incorrect format", resp.Error, "unexpected error message") // login with correct user and wrong password - code, _, body = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "wrong"}`)) + code, body = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "wrong"}`)) assert.Equal(t, http.StatusUnauthorized, code, "login should not be successful") resp = ts.parseErrorResonse(t, body) assert.Equal(t, "unauthorized: invalid email or password", resp.Error, "unexpected error message") // login with correct user and password - code, _, _ = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) + code, _ = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) assert.Equal(t, http.StatusOK, code, "login should be successful") // check if user is logged in and has a valid session - code, _, body = ts.get(t, "/api/i/users/me") + code, body = ts.get(t, "/api/i/users/me") assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") meResponse := &api_types.InternalGetUserInfoResponse{} @@ -213,17 +216,17 @@ func TestInternalLoginHandler(t *testing.T) { assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") // check if logout works - code, _, _ = ts.post(t, "/api/i/logout", bytes.NewBufferString(``)) + code, _ = ts.post(t, "/api/i/logout", bytes.NewBufferString(``)) assert.Equal(t, http.StatusOK, code, "logout should be successful") // check if user is logged out - code, _, _ = ts.get(t, "/api/i/users/me") + code, _ = ts.get(t, "/api/i/users/me") assert.Equal(t, http.StatusUnauthorized, code, "call to users/me should be unauthorized") } func TestInternalSearchHandler(t *testing.T) { // search for validator with index 5 - code, _, body := ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"5","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + code, body := ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"5","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) assert.Equal(t, http.StatusOK, code) resp := api_types.InternalPostSearchResponse{} @@ -234,7 +237,7 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") // search for validator by pubkey - code, _, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) assert.Equal(t, http.StatusOK, code) resp = api_types.InternalPostSearchResponse{} @@ -245,7 +248,7 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") // search for validator by withdawal address - code, _, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) assert.Equal(t, http.StatusOK, code) resp = api_types.InternalPostSearchResponse{} @@ -257,7 +260,7 @@ func TestInternalSearchHandler(t *testing.T) { } func TestSlotVizHandler(t *testing.T) { - code, _, body := ts.get(t, "/api/i/validator-dashboards/NQ/slot-viz") + code, body := ts.get(t, "/api/i/validator-dashboards/NQ/slot-viz") assert.Equal(t, http.StatusOK, code) resp := api_types.GetValidatorDashboardSlotVizResponse{} @@ -267,7 +270,6 @@ func TestSlotVizHandler(t *testing.T) { headStateCount := 0 for _, epoch := range resp.Data { - if epoch.State == "head" { // count the amount of head epochs returned, should be exactly 1 headStateCount++ } From 0f8f148fe10c92610490e6a30b5572708020417e Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Wed, 4 Sep 2024 10:29:29 +0200 Subject: [PATCH 054/187] feat: adjust style for displaying the accordion content. BEDS-405 --- frontend/assets/css/prime.scss | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/assets/css/prime.scss b/frontend/assets/css/prime.scss index 7d69b0607..ef6e8fa2e 100644 --- a/frontend/assets/css/prime.scss +++ b/frontend/assets/css/prime.scss @@ -597,6 +597,7 @@ div.p-accordion { align-items: center; gap: var(--padding); border: none; + justify-content: flex-start; .p-accordionheader-toggle-icon { display: none; } From 5f98765c33c7679f8f31f51dc7b83e1d393d5fe3 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:08:49 +0200 Subject: [PATCH 055/187] BEDS-306: monitoring: increase db timeout --- backend/pkg/monitoring/services/clickhouse_epoch.go | 2 +- backend/pkg/monitoring/services/clickhouse_rollings.go | 2 +- backend/pkg/monitoring/services/db_connections.go | 2 +- backend/pkg/monitoring/services/timeout_detector.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/pkg/monitoring/services/clickhouse_epoch.go b/backend/pkg/monitoring/services/clickhouse_epoch.go index ea3ee344f..8df5ed8dc 100644 --- a/backend/pkg/monitoring/services/clickhouse_epoch.go +++ b/backend/pkg/monitoring/services/clickhouse_epoch.go @@ -46,7 +46,7 @@ func (s *ServiceClickhouseEpoch) runChecks() { } log.Tracef("checking clickhouse epoch") // context with deadline - ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) + ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second) defer cancel() var t time.Time err := db.ClickHouseReader.GetContext(ctx, &t, "SELECT MAX(epoch_timestamp) FROM validator_dashboard_data_epoch") diff --git a/backend/pkg/monitoring/services/clickhouse_rollings.go b/backend/pkg/monitoring/services/clickhouse_rollings.go index 52fbd9f6b..054fb5738 100644 --- a/backend/pkg/monitoring/services/clickhouse_rollings.go +++ b/backend/pkg/monitoring/services/clickhouse_rollings.go @@ -64,7 +64,7 @@ func (s *ServiceClickhouseRollings) runChecks() { } log.Tracef("checking clickhouse rolling %s", rolling) // context with deadline - ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) + ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second) defer cancel() var delta uint64 err := db.ClickHouseReader.GetContext(ctx, &delta, fmt.Sprintf(` diff --git a/backend/pkg/monitoring/services/db_connections.go b/backend/pkg/monitoring/services/db_connections.go index 582618633..e0fe180b8 100644 --- a/backend/pkg/monitoring/services/db_connections.go +++ b/backend/pkg/monitoring/services/db_connections.go @@ -87,7 +87,7 @@ func (s *ServerDbConnections) checkDBConnections() { defer wg.Done() log.Tracef("checking db connection for %s", entry.ID) // context with deadline - ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) + ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second) defer cancel() r := NewStatusReport(entry.ID, constants.Default, 10*time.Second) switch edb := entry.DB.(type) { diff --git a/backend/pkg/monitoring/services/timeout_detector.go b/backend/pkg/monitoring/services/timeout_detector.go index a485c28d0..720bb80dc 100644 --- a/backend/pkg/monitoring/services/timeout_detector.go +++ b/backend/pkg/monitoring/services/timeout_detector.go @@ -97,7 +97,7 @@ func (s *ServiceTimeoutDetector) runChecks() { where status = 'running' and timeouts_at < now() ORDER BY event_id ASC, inserted_at DESC` // context with deadline - ctx, cancel := context.WithTimeout(s.ctx, 5*time.Second) + ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second) defer cancel() var victims []struct { EventID string `db:"event_id"` From 8933f7c8eace93bdbd6eeeaec6b4f3ea6359f8c5 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:41:53 +0000 Subject: [PATCH 056/187] feat(api): add integration test workflow --- .../workflows/backend-integration-test.yml | 44 +++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 .github/workflows/backend-integration-test.yml diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml new file mode 100644 index 000000000..12da9f685 --- /dev/null +++ b/.github/workflows/backend-integration-test.yml @@ -0,0 +1,44 @@ +name: Backend-Linter + +on: + push: + paths: + - 'backend/**' + branches: + - main + - staging + - BEDS-401/api_integration_tests + pull_request: + paths: + - 'backend/**' + branches: + - '*' + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: read + pull-requests: read + checks: write + +jobs: + build: + name: lint + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: '1.23' + cache: false + - name: golangci-lint + uses: golangci/golangci-lint-action@v3 + with: + version: v1.60.1 + working-directory: backend + args: --timeout=5m + + + From 89fae485a5ec482db7280a55f6aaf89fdd7e55c9 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:45:02 +0000 Subject: [PATCH 057/187] feat(api): update test workflow --- .github/workflows/backend-integration-test.yml | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 12da9f685..1a40f54b5 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -1,9 +1,9 @@ -name: Backend-Linter +name: Backend-Integration-Test on: push: - paths: - - 'backend/**' + # paths: + # - 'backend/**' branches: - main - staging @@ -25,7 +25,7 @@ permissions: jobs: build: - name: lint + name: integration-test runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 @@ -33,12 +33,8 @@ jobs: with: go-version: '1.23' cache: false - - name: golangci-lint - uses: golangci/golangci-lint-action@v3 - with: - version: v1.60.1 - working-directory: backend - args: --timeout=5m + - name: Display Go version + run: go version From a5ee230c08b23de6bdc0c97d2ec7ceeebfa06f83 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:47:38 +0000 Subject: [PATCH 058/187] chore(api): update test workflow --- .github/workflows/backend-integration-test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 1a40f54b5..02f893a43 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -33,8 +33,8 @@ jobs: with: go-version: '1.23' cache: false - - name: Display Go version - run: go version + - name: Test with the Go CLI + run: go test -v ./backend/pkg/api/handlers/* From f4d224d5affc96137cb4030f6a0202e09f60aabb Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:49:54 +0000 Subject: [PATCH 059/187] chore(api): update test workflow --- .github/workflows/backend-integration-test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 02f893a43..e6d509251 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -32,8 +32,9 @@ jobs: - uses: actions/setup-go@v4 with: go-version: '1.23' - cache: false + go-version-file: 'backend/go.mod' - name: Test with the Go CLI + working-directory: backend run: go test -v ./backend/pkg/api/handlers/* From ecba3dd3770e165dcf5a0948b26d2f1ec52b0ebb Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:50:40 +0000 Subject: [PATCH 060/187] chore: update test workflow --- .github/workflows/backend-integration-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index e6d509251..41c3428cd 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -35,7 +35,7 @@ jobs: go-version-file: 'backend/go.mod' - name: Test with the Go CLI working-directory: backend - run: go test -v ./backend/pkg/api/handlers/* + run: go test -v ./pkg/api/handlers/* From 6e281708afdcc47465c3c0e72566ef345520f4c5 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:53:49 +0000 Subject: [PATCH 061/187] chore: update test workflow --- .github/workflows/backend-integration-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 41c3428cd..1c894b12c 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -35,7 +35,7 @@ jobs: go-version-file: 'backend/go.mod' - name: Test with the Go CLI working-directory: backend - run: go test -v ./pkg/api/handlers/* + run: go test -v ./pkg/api/handlers/* .config "${{ secrets.CI_CONFIG_PATH }}" From 1c4cc5a885fa4426f13f5f6f8b54b77fd57faff3 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 09:55:23 +0000 Subject: [PATCH 062/187] chore(api): update test workflow --- .github/workflows/backend-integration-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 1c894b12c..501468ce9 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -35,7 +35,7 @@ jobs: go-version-file: 'backend/go.mod' - name: Test with the Go CLI working-directory: backend - run: go test -v ./pkg/api/handlers/* .config "${{ secrets.CI_CONFIG_PATH }}" + run: go test -v ./pkg/api/handlers/* -config "${{ secrets.CI_CONFIG_PATH }}" From f5a3cdcfbea44cfb7238843bc06e8c0a41009595 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:07:39 +0000 Subject: [PATCH 063/187] chore: update test workflow --- .github/workflows/backend-integration-test.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 501468ce9..c717a7cb6 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -1,5 +1,4 @@ name: Backend-Integration-Test - on: push: # paths: @@ -26,7 +25,7 @@ permissions: jobs: build: name: integration-test - runs-on: ubuntu-latest + runs-on: self-hosted steps: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 From 8e127a71732336e797bb91e69a0d1fa14cc637d5 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 10:29:56 +0000 Subject: [PATCH 064/187] chore: enable backend integration tests on push to main and staging branches --- .github/workflows/backend-integration-test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index c717a7cb6..a4505308a 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -1,8 +1,8 @@ name: Backend-Integration-Test on: push: - # paths: - # - 'backend/**' + paths: + - 'backend/**' branches: - main - staging From 97a53920fa4cf2ad94f91163a3c846778819247e Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:38:24 +0200 Subject: [PATCH 065/187] refactor: change database connection log level to debug --- backend/pkg/commons/db/db.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/pkg/commons/db/db.go b/backend/pkg/commons/db/db.go index ee046b57d..bc1489514 100644 --- a/backend/pkg/commons/db/db.go +++ b/backend/pkg/commons/db/db.go @@ -117,7 +117,7 @@ func MustInitDB(writer *types.DatabaseConfig, reader *types.DatabaseConfig, driv } } - log.Infof("connecting to %s database %s:%s/%s as writer with %d/%d max open/idle connections", databaseBrand, writer.Host, writer.Port, writer.Name, writer.MaxOpenConns, writer.MaxIdleConns) + log.Debugf("connecting to %s database %s:%s/%s as writer with %d/%d max open/idle connections", databaseBrand, writer.Host, writer.Port, writer.Name, writer.MaxOpenConns, writer.MaxIdleConns) dbConnWriter, err := sqlx.Open(driverName, fmt.Sprintf("%s://%s:%s@%s/%s?%s", databaseBrand, writer.Username, writer.Password, net.JoinHostPort(writer.Host, writer.Port), writer.Name, sslParam)) if err != nil { log.Fatal(err, "error getting Connection Writer database", 0) @@ -147,7 +147,7 @@ func MustInitDB(writer *types.DatabaseConfig, reader *types.DatabaseConfig, driv } } - log.Infof("connecting to %s database %s:%s/%s as reader with %d/%d max open/idle connections", databaseBrand, reader.Host, reader.Port, reader.Name, reader.MaxOpenConns, reader.MaxIdleConns) + log.Debugf("connecting to %s database %s:%s/%s as reader with %d/%d max open/idle connections", databaseBrand, reader.Host, reader.Port, reader.Name, reader.MaxOpenConns, reader.MaxIdleConns) dbConnReader, err := sqlx.Open(driverName, fmt.Sprintf("%s://%s:%s@%s/%s?%s", databaseBrand, reader.Username, reader.Password, net.JoinHostPort(reader.Host, reader.Port), reader.Name, sslParam)) if err != nil { log.Fatal(err, "error getting Connection Reader database", 0) From 603ecc6cc11a316e79721c0532b9480649a12543 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:06:36 +0000 Subject: [PATCH 066/187] chore(api): code review comments --- .../workflows/backend-integration-test.yml | 2 +- backend/go.mod | 2 +- .../handlers_test.go => api_test.go} | 153 ++++++++++++------ 3 files changed, 102 insertions(+), 55 deletions(-) rename backend/pkg/api/{handlers/handlers_test.go => api_test.go} (69%) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index a4505308a..4b4d68beb 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -34,7 +34,7 @@ jobs: go-version-file: 'backend/go.mod' - name: Test with the Go CLI working-directory: backend - run: go test -v ./pkg/api/handlers/* -config "${{ secrets.CI_CONFIG_PATH }}" + run: go test ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" diff --git a/backend/go.mod b/backend/go.mod index 8e2ae391c..fe53e1c8e 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -70,7 +70,6 @@ require ( github.com/xeipuuv/gojsonschema v1.2.0 golang.org/x/crypto v0.19.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a - golang.org/x/net v0.21.0 golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 @@ -236,6 +235,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.15.0 // indirect + golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect diff --git a/backend/pkg/api/handlers/handlers_test.go b/backend/pkg/api/api_test.go similarity index 69% rename from backend/pkg/api/handlers/handlers_test.go rename to backend/pkg/api/api_test.go index 42e928257..af4619491 100644 --- a/backend/pkg/api/handlers/handlers_test.go +++ b/backend/pkg/api/api_test.go @@ -1,4 +1,4 @@ -package handlers_test +package api_test import ( "bytes" @@ -35,25 +35,14 @@ type testServer struct { *httptest.Server } -// Implement a get() method on our custom testServer type. This makes a GET -// request to a given url path using the test server client, and returns the -// response status code, headers and body. -func (ts *testServer) get(t *testing.T, urlPath string) (int, string) { - rs, err := ts.Client().Get(ts.URL + urlPath) +func (ts *testServer) request(t *testing.T, method, urlPath string, data io.Reader) (int, string) { + req, err := http.NewRequest(method, ts.URL+urlPath, data) if err != nil { t.Fatal(err) } - defer rs.Body.Close() - body, err := io.ReadAll(rs.Body) - if err != nil { - t.Fatal(err) - } - bytes.TrimSpace(body) - return rs.StatusCode, string(body) -} + req.Header.Set("Content-Type", "application/json") -func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, string) { - rs, err := ts.Client().Post(ts.URL+urlPath, "application/json", data) + rs, err := ts.Client().Do(req) if err != nil { t.Fatal(err) } @@ -65,6 +54,12 @@ func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, s bytes.TrimSpace(body) return rs.StatusCode, string(body) } +func (ts *testServer) get(t *testing.T, urlPath string) (int, string) { + return ts.request(t, http.MethodGet, urlPath, nil) +} +func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, string) { + return ts.request(t, http.MethodPost, urlPath, data) +} func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.ApiErrorResponse { resp := api_types.ApiErrorResponse{} @@ -115,7 +110,7 @@ func setup() { log.Fatal(err, "error connection to test db", 0) } - if err := goose.Up(tempDb.DB, "../../../pkg/commons/db/migrations/postgres"); err != nil { + if err := goose.Up(tempDb.DB, "../../pkg/commons/db/migrations/postgres"); err != nil { log.Fatal(err, "error running migrations", 0) } @@ -189,44 +184,64 @@ func TestInternalGetLatestStateHandler(t *testing.T) { } func TestInternalLoginHandler(t *testing.T) { - // login with email in wrong format - code, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) - assert.Equal(t, http.StatusBadRequest, code) - resp := ts.parseErrorResonse(t, body) - assert.Equal(t, "email: given value 'admin' has incorrect format", resp.Error, "unexpected error message") - - // login with correct user and wrong password - code, body = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "wrong"}`)) - assert.Equal(t, http.StatusUnauthorized, code, "login should not be successful") - resp = ts.parseErrorResonse(t, body) - assert.Equal(t, "unauthorized: invalid email or password", resp.Error, "unexpected error message") - - // login with correct user and password - code, _ = ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) - assert.Equal(t, http.StatusOK, code, "login should be successful") - - // check if user is logged in and has a valid session - code, body = ts.get(t, "/api/i/users/me") - assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") - - meResponse := &api_types.InternalGetUserInfoResponse{} - err := json.Unmarshal([]byte(body), meResponse) - assert.Nil(t, err, "error unmarshalling response") - // check if email is censored - assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") - - // check if logout works - code, _ = ts.post(t, "/api/i/logout", bytes.NewBufferString(``)) - assert.Equal(t, http.StatusOK, code, "logout should be successful") - - // check if user is logged out - code, _ = ts.get(t, "/api/i/users/me") - assert.Equal(t, http.StatusUnauthorized, code, "call to users/me should be unauthorized") + t.Run("login with email in wrong format", func(t *testing.T) { + code, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) + assert.Equal(t, http.StatusBadRequest, code) + resp := ts.parseErrorResonse(t, body) + assert.Equal(t, "email: given value 'admin' has incorrect format", resp.Error, "unexpected error message") + }) + t.Run("login with correct user and wrong password", func(t *testing.T) { + code, body := ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "wrong"}`)) + assert.Equal(t, http.StatusUnauthorized, code, "login should not be successful") + resp := ts.parseErrorResonse(t, body) + assert.Equal(t, "unauthorized: invalid email or password", resp.Error, "unexpected error message") + }) + + t.Run("login with correct user and password", func(t *testing.T) { + code, _ := ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) + assert.Equal(t, http.StatusOK, code, "login should be successful") + }) + + t.Run("check if user is logged in and has a valid session", func(t *testing.T) { + code, body := ts.get(t, "/api/i/users/me") + assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") + + meResponse := &api_types.InternalGetUserInfoResponse{} + err := json.Unmarshal([]byte(body), meResponse) + assert.Nil(t, err, "error unmarshalling response") + // check if email is censored + assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") + }) + + t.Run("check if logout works", func(t *testing.T) { + code, _ := ts.post(t, "/api/i/logout", bytes.NewBufferString(``)) + assert.Equal(t, http.StatusOK, code, "logout should be successful") + }) + t.Run("// check if user is logged out", func(t *testing.T) { + code, _ := ts.get(t, "/api/i/users/me") + assert.Equal(t, http.StatusUnauthorized, code, "call to users/me should be unauthorized") + }) } func TestInternalSearchHandler(t *testing.T) { // search for validator with index 5 - code, body := ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"5","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + code, body := ts.post(t, "/api/i/search", bytes.NewBufferString(` + { + "input":"5", + "networks":[ + 17000 + ], + "types":[ + "validators_by_deposit_ens_name", + "validators_by_deposit_address", + "validators_by_withdrawal_ens_name", + "validators_by_withdrawal_address", + "validators_by_withdrawal_credential", + "validator_by_index", + "validator_by_public_key", + "validators_by_graffiti" + ] + }`)) assert.Equal(t, http.StatusOK, code) resp := api_types.InternalPostSearchResponse{} @@ -237,7 +252,23 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") // search for validator by pubkey - code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(` + { + "input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0", + "networks":[ + 17000 + ], + "types":[ + "validators_by_deposit_ens_name", + "validators_by_deposit_address", + "validators_by_withdrawal_ens_name", + "validators_by_withdrawal_address", + "validators_by_withdrawal_credential", + "validator_by_index", + "validator_by_public_key", + "validators_by_graffiti" + ] + }`)) assert.Equal(t, http.StatusOK, code) resp = api_types.InternalPostSearchResponse{} @@ -248,7 +279,23 @@ func TestInternalSearchHandler(t *testing.T) { assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") // search for validator by withdawal address - code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(`{"input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444","networks":[17000],"types":["validators_by_deposit_ens_name","validators_by_deposit_address","validators_by_withdrawal_ens_name","validators_by_withdrawal_address","validators_by_withdrawal_credential","validator_by_index","validator_by_public_key","validators_by_graffiti"]}`)) + code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(` + { + "input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444", + "networks":[ + 17000 + ], + "types":[ + "validators_by_deposit_ens_name", + "validators_by_deposit_address", + "validators_by_withdrawal_ens_name", + "validators_by_withdrawal_address", + "validators_by_withdrawal_credential", + "validator_by_index", + "validator_by_public_key", + "validators_by_graffiti" + ] + }`)) assert.Equal(t, http.StatusOK, code) resp = api_types.InternalPostSearchResponse{} From 73c2653048e16095808b96c6ae4eab99dc44bed2 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:11:14 +0000 Subject: [PATCH 067/187] chore: Update backend integration test workflow --- .github/workflows/backend-integration-test.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 4b4d68beb..89b393b44 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -6,7 +6,6 @@ on: branches: - main - staging - - BEDS-401/api_integration_tests pull_request: paths: - 'backend/**' @@ -30,7 +29,6 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v4 with: - go-version: '1.23' go-version-file: 'backend/go.mod' - name: Test with the Go CLI working-directory: backend From c63a2ee7b5edd22a03bccf07aeef837013826a60 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Wed, 4 Sep 2024 14:11:25 +0200 Subject: [PATCH 068/187] fix: :lipstick: Add workaround to fix the shifitng-display issue and background color when opening the modal. It is an temporary. --- frontend/assets/css/prime.scss | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/frontend/assets/css/prime.scss b/frontend/assets/css/prime.scss index ef6e8fa2e..1e80ebaa3 100644 --- a/frontend/assets/css/prime.scss +++ b/frontend/assets/css/prime.scss @@ -628,3 +628,17 @@ div.p-accordion { } } } + + +/** + * TODO: remove the .p-overflow-hidden and .p-overlay-mask class when PrimeVue is updated. + * This is quick-fix for shifting display issues. + **/ + .p-overflow-hidden { + overflow: hidden !important; /* Block scroll */ + border-right: solid 5px transparent !important; +} + +.p-overlay-mask { + background: var(--container-background); +} From d127dbe2ff13d01d2c83405b6fcde882f57e4a7a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:14:13 +0000 Subject: [PATCH 069/187] chore: Update backend integration test workflow --- .github/workflows/backend-integration-test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 89b393b44..e05ef5c93 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -30,6 +30,7 @@ jobs: - uses: actions/setup-go@v4 with: go-version-file: 'backend/go.mod' + cache-dependency-path: 'backend/go.sum' - name: Test with the Go CLI working-directory: backend run: go test ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" From 79632946b2c6388840975aa99da038f4a4c18471 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:16:38 +0000 Subject: [PATCH 070/187] chore: Initialize services and update logging in service.go --- backend/pkg/api/services/service.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/pkg/api/services/service.go b/backend/pkg/api/services/service.go index 872e3391b..8108d9687 100644 --- a/backend/pkg/api/services/service.go +++ b/backend/pkg/api/services/service.go @@ -45,8 +45,10 @@ func (s *Services) InitServices() { go s.startEfficiencyDataService(wg) go s.startEmailSenderService(wg) - wg.Wait() log.Infof("initializing prices...") price.Init(utils.Config.Chain.ClConfig.DepositChainID, utils.Config.Eth1ErigonEndpoint, utils.Config.Frontend.ClCurrency, utils.Config.Frontend.ElCurrency) log.Infof("...prices initialized") + + wg.Wait() + log.Infof("...services initialized") } From bf1d9d2c9c2c163458a1d4f070fb058e2a165b93 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:17:52 +0000 Subject: [PATCH 071/187] chore: Update backend integration test workflow --- .github/workflows/backend-integration-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index e05ef5c93..162085860 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -33,7 +33,7 @@ jobs: cache-dependency-path: 'backend/go.sum' - name: Test with the Go CLI working-directory: backend - run: go test ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" + run: go test -v ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" From 76a0184e24aec4aa46fa708ef65c817f7130449d Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:19:58 +0000 Subject: [PATCH 072/187] chore: Add log statement for test setup completion --- backend/pkg/api/api_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index af4619491..baca02684 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -71,6 +71,7 @@ func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.Api func TestMain(m *testing.M) { setup() + log.Info("test stup completed") code := m.Run() teardown() From 0a71110eda95f09f85ec33c80b5845aea251c1fa Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:21:28 +0000 Subject: [PATCH 073/187] chore: Update test setup completion log statement in api_test.go --- backend/pkg/api/api_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index baca02684..dc0df1b87 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -71,7 +71,7 @@ func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.Api func TestMain(m *testing.M) { setup() - log.Info("test stup completed") + log.Info("test setup completed") code := m.Run() teardown() @@ -149,7 +149,9 @@ func setup() { log.InfoWithFields(log.Fields{"config": *configPath, "version": version.Version, "commit": version.GitCommit, "chainName": utils.Config.Chain.ClConfig.ConfigName}, "starting") + log.Info("initializing data access service") dataAccessor = dataaccess.NewDataAccessService(cfg) + log.Info("initializing api router") router := api.NewApiRouter(dataAccessor, cfg) ts = &testServer{httptest.NewTLSServer(router)} From f574b4011a2a986d7bfbdc19900201af8ea1a22d Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:22:48 +0000 Subject: [PATCH 074/187] chore: Initialize services and update logging in service.go --- backend/pkg/api/data_access/data_access.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/pkg/api/data_access/data_access.go b/backend/pkg/api/data_access/data_access.go index aa8adead6..d3a5d8ad1 100644 --- a/backend/pkg/api/data_access/data_access.go +++ b/backend/pkg/api/data_access/data_access.go @@ -80,9 +80,11 @@ func NewDataAccessService(cfg *types.Config) *DataAccessService { db.BigtableClient = das.bigtable db.PersistentRedisDbClient = das.persistentRedisDbClient + log.Info("DataAccessService initialized") // Create the services das.services = services.NewServices(das.readerDb, das.writerDb, das.alloyReader, das.alloyWriter, das.clickhouseReader, das.bigtable, das.persistentRedisDbClient) + log.Info("Services created") // Initialize the services das.services.InitServices() From 6ae9373e528ff20fcce670652856209362dee583 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:24:21 +0000 Subject: [PATCH 075/187] chore: Update logging in service.go and initialize services --- backend/pkg/api/data_access/data_access.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/api/data_access/data_access.go b/backend/pkg/api/data_access/data_access.go index d3a5d8ad1..1e1c17ab9 100644 --- a/backend/pkg/api/data_access/data_access.go +++ b/backend/pkg/api/data_access/data_access.go @@ -84,7 +84,7 @@ func NewDataAccessService(cfg *types.Config) *DataAccessService { // Create the services das.services = services.NewServices(das.readerDb, das.writerDb, das.alloyReader, das.alloyWriter, das.clickhouseReader, das.bigtable, das.persistentRedisDbClient) - log.Info("Services created") + log.Info("Services created_") // Initialize the services das.services.InitServices() From 2254143e9d093882e938ccccf630544f0397c0a7 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:29:58 +0000 Subject: [PATCH 076/187] chore: Start data access services in api_test.go setup() --- backend/pkg/api/api_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index dc0df1b87..8a9d2c5e0 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -151,6 +151,8 @@ func setup() { log.Info("initializing data access service") dataAccessor = dataaccess.NewDataAccessService(cfg) + dataAccessor.StartDataAccessServices() + log.Info("initializing api router") router := api.NewApiRouter(dataAccessor, cfg) From fbd9716c9d6fc1cc55c467f43586d793e0bc4f7a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 12:41:38 +0000 Subject: [PATCH 077/187] chore: Refactor backend integration test workflow and update test setup in api_test.go --- .github/workflows/backend-integration-test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 162085860..e05ef5c93 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -33,7 +33,7 @@ jobs: cache-dependency-path: 'backend/go.sum' - name: Test with the Go CLI working-directory: backend - run: go test -v ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" + run: go test ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" From 9c48bd0e803918cb54948af4a0896515837cdce8 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Wed, 4 Sep 2024 13:01:38 +0000 Subject: [PATCH 078/187] fix(api): correct order in assert --- backend/pkg/api/api_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index 8a9d2c5e0..03febe7bd 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -334,7 +334,7 @@ func TestSlotVizHandler(t *testing.T) { } } - assert.Equal(t, attestationAssignments, 1, "epoch should have exactly one attestation assignment") + assert.Equal(t, 1, attestationAssignments, "epoch should have exactly one attestation assignment") } assert.Equal(t, 1, headStateCount, "one of the last 4 epochs should be in head state") } From a23489789263af3faa2a3e640fe11943d4c913c3 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Wed, 4 Sep 2024 15:59:57 +0200 Subject: [PATCH 079/187] fix frontend pipeline --- .../stores/dashboard/useUserDashboardStore.ts | 1 + frontend/types/api/archiver.ts | 16 ++++++++++++++++ frontend/types/api/validator_dashboard.ts | 2 +- 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 frontend/types/api/archiver.ts diff --git a/frontend/stores/dashboard/useUserDashboardStore.ts b/frontend/stores/dashboard/useUserDashboardStore.ts index a8cd42ddf..0d79f1d4d 100644 --- a/frontend/stores/dashboard/useUserDashboardStore.ts +++ b/frontend/stores/dashboard/useUserDashboardStore.ts @@ -121,6 +121,7 @@ export function useUserDashboardStore() { id: res.data.id, is_archived: false, name: res.data.name, + network: res.data.network, validator_count: 0, }, ], diff --git a/frontend/types/api/archiver.ts b/frontend/types/api/archiver.ts new file mode 100644 index 000000000..a0ecfe481 --- /dev/null +++ b/frontend/types/api/archiver.ts @@ -0,0 +1,16 @@ +// Code generated by tygo. DO NOT EDIT. +/* eslint-disable */ + +////////// +// source: archiver.go + +export interface ArchiverDashboard { + DashboardId: number /* uint64 */; + IsArchived: boolean; + GroupCount: number /* uint64 */; + ValidatorCount: number /* uint64 */; +} +export interface ArchiverDashboardArchiveReason { + DashboardId: number /* uint64 */; + ArchivedReason: any /* enums.VDBArchivedReason */; +} diff --git a/frontend/types/api/validator_dashboard.ts b/frontend/types/api/validator_dashboard.ts index fcf8ee6a4..10fb9d3e1 100644 --- a/frontend/types/api/validator_dashboard.ts +++ b/frontend/types/api/validator_dashboard.ts @@ -28,7 +28,7 @@ export interface VDBOverviewBalances { } export interface VDBOverviewData { name?: string; - network: string; + network: number /* uint64 */; groups: VDBOverviewGroup[]; validators: VDBOverviewValidators; efficiency: PeriodicValues; From b103de4988064fc5f74267e93b1bb04caab7cb71 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Thu, 5 Sep 2024 09:07:58 +0000 Subject: [PATCH 080/187] feat(api): add more integration tests --- .../workflows/backend-integration-test.yml | 2 +- backend/go.mod | 16 + backend/go.sum | 50 +++ backend/pkg/api/api_test.go | 381 ++++++++++++------ 4 files changed, 323 insertions(+), 126 deletions(-) diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index e05ef5c93..48243480e 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -33,7 +33,7 @@ jobs: cache-dependency-path: 'backend/go.sum' - name: Test with the Go CLI working-directory: backend - run: go test ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" + run: go test -failfast ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" diff --git a/backend/go.mod b/backend/go.mod index fe53e1c8e..b53006df9 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -22,6 +22,7 @@ require ( github.com/doug-martin/goqu/v9 v9.19.0 github.com/ethereum/go-ethereum v1.13.12 github.com/fergusstrange/embedded-postgres v1.29.0 + github.com/gavv/httpexpect/v2 v2.16.0 github.com/go-faker/faker/v4 v4.3.0 github.com/go-redis/redis/v8 v8.11.5 github.com/gobitfly/eth-rewards v0.1.2-0.20230403064929-411ddc40a5f7 @@ -89,6 +90,8 @@ require ( cloud.google.com/go/storage v1.36.0 // indirect github.com/ClickHouse/ch-go v0.58.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 // indirect + github.com/ajg/form v1.5.1 // indirect github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/andybalholm/brotli v1.0.6 // indirect @@ -124,6 +127,7 @@ require ( github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/ethereum/c-kzg-4844 v0.4.0 // indirect github.com/fatih/color v1.16.0 // indirect + github.com/fatih/structs v1.1.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ferranbt/fastssz v0.1.3 // indirect @@ -135,12 +139,14 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-yaml v1.9.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect @@ -149,6 +155,7 @@ require ( github.com/herumi/bls-eth-go-binary v1.31.0 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/huandu/go-clone v1.6.0 // indirect + github.com/imkira/go-interpol v1.1.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect github.com/ipfs/boxo v0.8.0 // indirect github.com/ipfs/go-bitfield v1.1.0 // indirect @@ -181,6 +188,7 @@ require ( github.com/minio/highwayhash v1.0.2 // indirect github.com/minio/sha256-simd v1.0.1 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -207,7 +215,9 @@ require ( github.com/prysmaticlabs/prysm/v3 v3.2.2 // indirect github.com/r3labs/sse/v2 v2.10.0 // indirect github.com/rs/zerolog v1.29.1 // indirect + github.com/sanity-io/litter v1.5.5 // indirect github.com/segmentio/asm v1.2.0 // indirect + github.com/sergi/go-diff v1.2.0 // indirect github.com/sethvargo/go-retry v0.2.4 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect @@ -215,6 +225,8 @@ require ( github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e // indirect github.com/tklauser/go-sysconf v0.3.13 // indirect github.com/tklauser/numcpus v0.7.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.34.0 // indirect github.com/wealdtech/go-bytesutil v1.2.0 // indirect github.com/wealdtech/go-merkletree v1.0.1-0.20190605192610-2bb163c2ea2a // indirect github.com/wealdtech/go-multicodec v1.4.0 // indirect @@ -224,6 +236,9 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect + github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect + github.com/yudai/gojsondiff v1.0.0 // indirect + github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect @@ -247,6 +262,7 @@ require ( gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect + moul.io/http2curl/v2 v2.3.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index f3e0980f2..7d41b5693 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -41,8 +41,12 @@ github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migc github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 h1:ZBbLwSJqkHBuFDA6DUhhse0IGJ7T5bemHyNILUjvOq4= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2/go.mod h1:VSw57q4QFiWDbRnjdX8Cb3Ow0SFncRw+bA/ofY6Q83w= github.com/VictoriaMetrics/fastcache v1.12.1 h1:i0mICQuojGDL3KblA7wUNlY5lOK6a4bwt3uRKnkZU40= github.com/VictoriaMetrics/fastcache v1.12.1/go.mod h1:tX04vaqcNoQeGLD+ra5pU5sWkuxnzWhEzLwhP9w653o= +github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= +github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= @@ -51,6 +55,7 @@ github.com/alexedwards/scs/redisstore v0.0.0-20240316134038-7e11d57e8885 h1:UdHe github.com/alexedwards/scs/redisstore v0.0.0-20240316134038-7e11d57e8885/go.mod h1:ceKFatoD+hfHWWeHOAYue1J+XgOJjE7dw8l3JtIRTGY= github.com/alexedwards/scs/v2 v2.8.0 h1:h31yUYoycPuL0zt14c0gd+oqxfRwIj6SOjHdKRZxhEw= github.com/alexedwards/scs/v2 v2.8.0/go.mod h1:ToaROZxyKukJKT/xLcVQAChi5k6+Pn1Gvmdl7h3RRj8= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/andybalholm/brotli v1.0.6 h1:Yf9fFpf49Zrxb9NlQaluyE92/+X7UVHlhMNJN2sxfOI= github.com/andybalholm/brotli v1.0.6/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230512164433-5d1fd1a340c9 h1:goHVqTbFX3AIo0tzGr14pgfAW2ZfPChKO21Z9MGf/gk= @@ -177,6 +182,7 @@ github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -233,6 +239,8 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+ne github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -251,6 +259,8 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gavv/httpexpect/v2 v2.16.0 h1:Ty2favARiTYTOkCRZGX7ojXXjGyNAIohM1lZ3vqaEwI= +github.com/gavv/httpexpect/v2 v2.16.0/go.mod h1:uJLaO+hQ25ukBJtQi750PsztObHybNllN+t+MbbW8PY= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays= github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww= github.com/gballet/go-verkle v0.1.1-0.20231031103413-a67434b50f46 h1:BAIP2GihuqhwdILrV+7GJel5lyPV3u1+PgzrWLc0TkE= @@ -299,6 +309,8 @@ github.com/gobitfly/eth.store v0.0.0-20240312111708-b43f13990280 h1:zHl4a19bwoa3 github.com/gobitfly/eth.store v0.0.0-20240312111708-b43f13990280/go.mod h1:1PLeTVRw8Rpmi0o/kRuoJEXOXecZRqSjoAxEMbj+usA= github.com/gobitfly/prysm/v3 v3.0.0-20230216184552-2f3f1e8190d5 h1:8kVoXCPhDwSjaGlKzBVQeE8n49k6jZumBGiP26FHNy0= github.com/gobitfly/prysm/v3 v3.0.0-20230216184552-2f3f1e8190d5/go.mod h1:+v+em7rOykPs93APGWCX/95/3uxU8bSVmbZ4+YNJzdA= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-yaml v1.9.5 h1:Eh/+3uk9kLxG4koCX6lRMAPS1OaMSAi+FJcya0INdB0= @@ -357,6 +369,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -407,6 +421,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/herumi/bls-eth-go-binary v1.31.0 h1:9eeW3EA4epCb7FIHt2luENpAW69MvKGL5jieHlBiP+w= github.com/herumi/bls-eth-go-binary v1.31.0/go.mod h1:luAnRm3OsMQeokhGzpYmc0ZKwawY7o87PUEP11Z7r7U= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw= github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= @@ -426,6 +442,8 @@ github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFck github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI= github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= @@ -564,6 +582,7 @@ github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhd github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -657,6 +676,8 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= @@ -747,9 +768,11 @@ github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhM github.com/phpdave11/gofpdi v1.0.7/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= @@ -811,9 +834,14 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= +github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sethvargo/go-retry v0.2.4 h1:T+jHEQy/zKJf5s95UkguisicE0zuF9y7+/vgz08Ocec= github.com/sethvargo/go-retry v0.2.4/go.mod h1:1afjQuvh7s4gflMObvjLPaWgluLLyhA1wmVZ6KLpICw= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= @@ -845,6 +873,7 @@ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSS github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -861,6 +890,7 @@ github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbe github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e h1:cR8/SYRgyQCt5cNCMniB/ZScMkhI9nk8U5C7SbISXjo= github.com/thomaso-mirodin/intmath v0.0.0-20160323211736-5dc6d854e46e/go.mod h1:Tu4lItkATkonrYuvtVjG0/rhy15qrNGNTjPdaphtZ/8= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -880,6 +910,11 @@ github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60Nt github.com/urfave/cli v1.22.12 h1:igJgVw1JdKH+trcLWLeLwZjU9fEfPesQ+9/e4MQ44S8= github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs= github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4= +github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= github.com/vertica/vertica-sql-go v1.3.3 h1:fL+FKEAEy5ONmsvya2WH5T8bhkvY27y/Ik3ReR2T+Qw= github.com/vertica/vertica-sql-go v1.3.3/go.mod h1:jnn2GFuv+O2Jcjktb7zyc4Utlbu9YVqpHH/lx63+1M4= github.com/warpfork/go-testmark v0.11.0 h1:J6LnV8KpceDvo7spaNU4+DauH2n1x+6RaO2rJrmpQ9U= @@ -921,11 +956,19 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= +github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/ydb-platform/ydb-go-genproto v0.0.0-20240126124512-dbb0e1720dbf h1:ckwNHVo4bv2tqNkgx3W3HANh3ta1j6TR5qw08J1A7Tw= github.com/ydb-platform/ydb-go-genproto v0.0.0-20240126124512-dbb0e1720dbf/go.mod h1:Er+FePu1dNUieD+XTMDduGpQuCPssK5Q4BjF+IIXJ3I= github.com/ydb-platform/ydb-go-sdk/v3 v3.55.1 h1:Ebo6J5AMXgJ3A438ECYotA0aK7ETqjQx9WoZvVxzKBE= github.com/ydb-platform/ydb-go-sdk/v3 v3.55.1/go.mod h1:udNPW8eupyH/EZocecFmaSNJacKKYjzQa7cVgX5U2nc= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -987,6 +1030,7 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= @@ -1002,6 +1046,7 @@ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKG golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= @@ -1072,6 +1117,7 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1113,6 +1159,7 @@ golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1168,6 +1215,7 @@ gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1226,6 +1274,8 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= +moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index 03febe7bd..c422bf17f 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -1,19 +1,20 @@ package api_test import ( - "bytes" - "encoding/json" + "crypto/tls" "flag" - "io" + "fmt" "net/http" "net/http/cookiejar" "net/http/httptest" "os" "os/exec" + "sort" "testing" "time" embeddedpostgres "github.com/fergusstrange/embedded-postgres" + "github.com/gavv/httpexpect/v2" "github.com/gobitfly/beaconchain/pkg/api" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" api_types "github.com/gobitfly/beaconchain/pkg/api/types" @@ -27,50 +28,17 @@ import ( "golang.org/x/crypto/bcrypt" ) -var ts *testServer +var ts *httptest.Server var dataAccessor dataaccess.DataAccessor var postgres *embeddedpostgres.EmbeddedPostgres -type testServer struct { - *httptest.Server -} - -func (ts *testServer) request(t *testing.T, method, urlPath string, data io.Reader) (int, string) { - req, err := http.NewRequest(method, ts.URL+urlPath, data) - if err != nil { - t.Fatal(err) - } - req.Header.Set("Content-Type", "application/json") - - rs, err := ts.Client().Do(req) - if err != nil { - t.Fatal(err) - } - defer rs.Body.Close() - body, err := io.ReadAll(rs.Body) +func TestMain(m *testing.M) { + err := setup() if err != nil { - t.Fatal(err) - } - bytes.TrimSpace(body) - return rs.StatusCode, string(body) -} -func (ts *testServer) get(t *testing.T, urlPath string) (int, string) { - return ts.request(t, http.MethodGet, urlPath, nil) -} -func (ts *testServer) post(t *testing.T, urlPath string, data io.Reader) (int, string) { - return ts.request(t, http.MethodPost, urlPath, data) -} - -func (ts *testServer) parseErrorResonse(t *testing.T, body string) api_types.ApiErrorResponse { - resp := api_types.ApiErrorResponse{} - if err := json.Unmarshal([]byte(body), &resp); err != nil { - t.Fatal(err) + log.Error(err, "error setting up test environment", 0) + teardown() + os.Exit(1) } - return resp -} - -func TestMain(m *testing.M) { - setup() log.Info("test setup completed") code := m.Run() teardown() @@ -82,15 +50,21 @@ func TestMain(m *testing.M) { } func teardown() { - dataAccessor.Close() - ts.Close() - err := postgres.Stop() - if err != nil { - log.Error(err, "error stopping embedded postgres", 0) + if dataAccessor != nil { + dataAccessor.Close() + } + if ts != nil { + ts.Close() + } + if postgres != nil { + err := postgres.Stop() + if err != nil { + log.Error(err, "error stopping embedded postgres", 0) + } } } -func setup() { +func setup() error { configPath := flag.String("config", "", "Path to the config file, if empty string defaults will be used") flag.Parse() @@ -102,17 +76,17 @@ func setup() { postgres = embeddedpostgres.NewDatabase(embeddedpostgres.DefaultConfig().Username("postgres")) err := postgres.Start() if err != nil { - log.Fatal(err, "error starting embedded postgres", 0) + return fmt.Errorf("error starting embedded postgres: %w", err) } // connection the the embedded db and run migrations tempDb, err := sqlx.Connect("postgres", "host=localhost port=5432 user=postgres password=postgres dbname=postgres sslmode=disable") if err != nil { - log.Fatal(err, "error connection to test db", 0) + return fmt.Errorf("error connection to test db: %w", err) } if err := goose.Up(tempDb.DB, "../../pkg/commons/db/migrations/postgres"); err != nil { - log.Fatal(err, "error running migrations", 0) + return fmt.Errorf("error running migrations: %w", err) } // insert dummy user for testing (email: admin@admin, password: admin) @@ -123,13 +97,24 @@ func setup() { string(pHash), "admin@admin.com", time.Now().Unix(), "admin", true, ) if err != nil { - log.Fatal(err, "error inserting user", 0) + return fmt.Errorf("error inserting user: %w", err) + } + + // required for shared dashboard + pHash, _ = bcrypt.GenerateFromPassword([]byte("admin"), 10) + _, err = tempDb.Exec(` + INSERT INTO users (id, password, email, register_ts, api_key, email_confirmed) + VALUES ($1, $2, $3, TO_TIMESTAMP($4), $5, $6)`, + 122558, string(pHash), "admin2@admin.com", time.Now().Unix(), "admin2", true, + ) + if err != nil { + return fmt.Errorf("error inserting user 2: %w", err) } cfg := &types.Config{} err = utils.ReadConfig(cfg, *configPath) if err != nil { - log.Fatal(err, "error reading config file", 0) + return fmt.Errorf("error reading config file: %w", err) } // hardcode db connection details for testing @@ -156,21 +141,56 @@ func setup() { log.Info("initializing api router") router := api.NewApiRouter(dataAccessor, cfg) - ts = &testServer{httptest.NewTLSServer(router)} + ts = httptest.NewTLSServer(router) jar, err := cookiejar.New(nil) if err != nil { - log.Fatal(err, "error creating cookie jar", 0) + return fmt.Errorf("error creating cookie jar: %w", err) } - ts.Server.Client().Jar = jar + ts.Client().Jar = jar + + return nil +} + +func getExpectConfig(t *testing.T, ts *httptest.Server) httpexpect.Config { + return httpexpect.Config{ + BaseURL: ts.URL, + Reporter: httpexpect.NewAssertReporter(t), + Client: &http.Client{ + Jar: httpexpect.NewCookieJar(), + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + // accept any certificate; for testing only! + //nolint: gosec + InsecureSkipVerify: true, + }, + }, + }, + Printers: []httpexpect.Printer{ + httpexpect.NewCurlPrinter(t), + }, + } +} + +func login(e *httpexpect.Expect) { + e.POST("/api/i/login"). + WithHeader("Content-Type", "application/json"). + WithJSON(map[string]interface{}{"email": "admin@admin.com", "password": "admin"}). + Expect(). + Status(http.StatusOK) +} + +func logout(e *httpexpect.Expect) { + e.POST("/api/i/logout"). + Expect(). + Status(http.StatusOK) } func TestInternalGetProductSummaryHandler(t *testing.T) { - code, body := ts.get(t, "/api/i/product-summary") - assert.Equal(t, http.StatusOK, code) + e := httpexpect.WithConfig(getExpectConfig(t, ts)) respData := api_types.InternalGetProductSummaryResponse{} - err := json.Unmarshal([]byte(body), &respData) - assert.Nil(t, err, "error unmarshalling response") + e.GET("/api/i/product-summary").Expect().Status(http.StatusOK).JSON().Decode(&respData) + assert.NotEqual(t, 0, respData.Data.ValidatorsPerDashboardLimit, "ValidatorsPerDashboardLimit should not be 0") assert.NotEqual(t, 0, len(respData.Data.ApiProducts), "ApiProducts should not be empty") assert.NotEqual(t, 0, len(respData.Data.ExtraDashboardValidatorsPremiumAddon), "ExtraDashboardValidatorsPremiumAddon should not be empty") @@ -178,59 +198,72 @@ func TestInternalGetProductSummaryHandler(t *testing.T) { } func TestInternalGetLatestStateHandler(t *testing.T) { - code, body := ts.get(t, "/api/i/latest-state") - assert.Equal(t, http.StatusOK, code) + e := httpexpect.WithConfig(getExpectConfig(t, ts)) respData := api_types.InternalGetLatestStateResponse{} - err := json.Unmarshal([]byte(body), &respData) - assert.Nil(t, err, "error unmarshalling response") + e.GET("/api/i/latest-state").Expect().Status(http.StatusOK).JSON().Decode(&respData) + assert.NotEqual(t, uint64(0), respData.Data.LatestSlot, "latest slot should not be 0") assert.NotEqual(t, uint64(0), respData.Data.FinalizedEpoch, "finalized epoch should not be 0") } func TestInternalLoginHandler(t *testing.T) { + e := httpexpect.WithConfig(getExpectConfig(t, ts)) t.Run("login with email in wrong format", func(t *testing.T) { - code, body := ts.post(t, "/api/i/login", bytes.NewBuffer([]byte(`{"email": "admin", "password": "admin"}`))) - assert.Equal(t, http.StatusBadRequest, code) - resp := ts.parseErrorResonse(t, body) - assert.Equal(t, "email: given value 'admin' has incorrect format", resp.Error, "unexpected error message") + e.POST("/api/i/login"). + WithHeader("Content-Type", "application/json"). + WithJSON(map[string]interface{}{"email": "admin", "password": "admin"}). + Expect(). + Status(http.StatusBadRequest). + JSON(). + Object(). + HasValue("error", "email: given value 'admin' has incorrect format") }) t.Run("login with correct user and wrong password", func(t *testing.T) { - code, body := ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "wrong"}`)) - assert.Equal(t, http.StatusUnauthorized, code, "login should not be successful") - resp := ts.parseErrorResonse(t, body) - assert.Equal(t, "unauthorized: invalid email or password", resp.Error, "unexpected error message") + e.POST("/api/i/login"). + WithHeader("Content-Type", "application/json"). + WithJSON(map[string]interface{}{"email": "admin@admin.com", "password": "wrong"}). + Expect(). + Status(http.StatusUnauthorized). + JSON(). + Object(). + HasValue("error", "unauthorized: invalid email or password") }) t.Run("login with correct user and password", func(t *testing.T) { - code, _ := ts.post(t, "/api/i/login", bytes.NewBufferString(`{"email": "admin@admin.com", "password": "admin"}`)) - assert.Equal(t, http.StatusOK, code, "login should be successful") + login(e) }) t.Run("check if user is logged in and has a valid session", func(t *testing.T) { - code, body := ts.get(t, "/api/i/users/me") - assert.Equal(t, http.StatusOK, code, "call to users/me should be successful") - meResponse := &api_types.InternalGetUserInfoResponse{} - err := json.Unmarshal([]byte(body), meResponse) - assert.Nil(t, err, "error unmarshalling response") + e.GET("/api/i/users/me"). + Expect(). + Status(http.StatusOK). + JSON(). + Decode(&meResponse) + // check if email is censored assert.Equal(t, meResponse.Data.Email, "a***n@a***n.com", "email should be a***n@a***n.com") }) t.Run("check if logout works", func(t *testing.T) { - code, _ := ts.post(t, "/api/i/logout", bytes.NewBufferString(``)) - assert.Equal(t, http.StatusOK, code, "logout should be successful") + logout(e) }) t.Run("// check if user is logged out", func(t *testing.T) { - code, _ := ts.get(t, "/api/i/users/me") - assert.Equal(t, http.StatusUnauthorized, code, "call to users/me should be unauthorized") + e.GET("/api/i/users/me"). + Expect(). + Status(http.StatusUnauthorized) }) } func TestInternalSearchHandler(t *testing.T) { + e := httpexpect.WithConfig(getExpectConfig(t, ts)) + // search for validator with index 5 - code, body := ts.post(t, "/api/i/search", bytes.NewBufferString(` + resp := api_types.InternalPostSearchResponse{} + e.POST("/api/i/search"). + WithHeader("Content-Type", "application/json"). + WithBytes([]byte(` { "input":"5", "networks":[ @@ -246,18 +279,17 @@ func TestInternalSearchHandler(t *testing.T) { "validator_by_public_key", "validators_by_graffiti" ] - }`)) - assert.Equal(t, http.StatusOK, code) + }`)).Expect().Status(http.StatusOK).JSON().Decode(&resp) - resp := api_types.InternalPostSearchResponse{} - err := json.Unmarshal([]byte(body), &resp) - assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") // search for validator by pubkey - code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(` + resp = api_types.InternalPostSearchResponse{} + e.POST("/api/i/search"). + WithHeader("Content-Type", "application/json"). + WithBytes([]byte(` { "input":"0x9699af2bad9826694a480cb523cbe545dc41db955356b3b0d4871f1cf3e4924ae4132fa8c374a0505ae2076d3d65b3e0", "networks":[ @@ -273,19 +305,17 @@ func TestInternalSearchHandler(t *testing.T) { "validator_by_public_key", "validators_by_graffiti" ] - }`)) - assert.Equal(t, http.StatusOK, code) + }`)).Expect().Status(http.StatusOK).JSON().Decode(&resp) - resp = api_types.InternalPostSearchResponse{} - err = json.Unmarshal([]byte(body), &resp) - assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Equal(t, uint64(5), *resp.Data[0].NumValue, "validator index should be 5") // search for validator by withdawal address - code, body = ts.post(t, "/api/i/search", bytes.NewBufferString(` - { + resp = api_types.InternalPostSearchResponse{} + e.POST("/api/i/search"). + WithHeader("Content-Type", "application/json"). + WithBytes([]byte(`{ "input":"0x0e5dda855eb1de2a212cd1f62b2a3ee49d20c444", "networks":[ 17000 @@ -300,41 +330,142 @@ func TestInternalSearchHandler(t *testing.T) { "validator_by_public_key", "validators_by_graffiti" ] - }`)) - assert.Equal(t, http.StatusOK, code) + }`)).Expect().Status(http.StatusOK).JSON().Decode(&resp) - resp = api_types.InternalPostSearchResponse{} - err = json.Unmarshal([]byte(body), &resp) - assert.Nil(t, err, "error unmarshalling response") assert.NotEqual(t, 0, len(resp.Data), "response data should not be empty") assert.NotNil(t, resp.Data[0].NumValue, "validator index should not be nil") assert.Greater(t, *resp.Data[0].NumValue, uint64(0), "returned number of validators should be greater than 0") } -func TestSlotVizHandler(t *testing.T) { - code, body := ts.get(t, "/api/i/validator-dashboards/NQ/slot-viz") - assert.Equal(t, http.StatusOK, code) - - resp := api_types.GetValidatorDashboardSlotVizResponse{} - err := json.Unmarshal([]byte(body), &resp) - assert.Nil(t, err, "error unmarshalling response") - assert.Equal(t, 4, len(resp.Data), "response data should contain the last 4 epochs") - - headStateCount := 0 - for _, epoch := range resp.Data { - if epoch.State == "head" { // count the amount of head epochs returned, should be exactly 1 - headStateCount++ - } - attestationAssignments := 0 - assert.Equal(t, 32, len(epoch.Slots), "each epoch should contain 32 slots") +func TestPublicAndSharedDashboards(t *testing.T) { + t.Parallel() + e := httpexpect.WithConfig(getExpectConfig(t, ts)) + + dashboardIds := []struct { + id string + isShared bool + }{ + {id: "NQ", isShared: false}, + {id: "MSwxNTU2MSwxNTY", isShared: false}, + {id: "v-80d7edaa-74fb-4129-a41e-7700756961cf", isShared: true}, + } - for _, slot := range epoch.Slots { - if slot.Attestations != nil { // count the amount of attestation assignments for each epoch, should be exactly 1 - attestationAssignments++ + for _, dashboardId := range dashboardIds { + t.Run(fmt.Sprintf("[%s]: test slot viz", dashboardId.id), func(t *testing.T) { + resp := api_types.GetValidatorDashboardSlotVizResponse{} + e.GET("/api/i/validator-dashboards/{id}/slot-viz", dashboardId.id). + Expect(). + Status(http.StatusOK). + JSON().Decode(&resp) + + assert.Equal(t, 4, len(resp.Data), "response data should contain the last 4 epochs") + + headStateCount := 0 + for _, epoch := range resp.Data { + if epoch.State == "head" { // count the amount of head epochs returned, should be exactly 1 + headStateCount++ + } + attestationAssignments := 0 + assert.Equal(t, 32, len(epoch.Slots), "each epoch should contain 32 slots") + + for _, slot := range epoch.Slots { + if slot.Attestations != nil { // count the amount of attestation assignments for each epoch, should be exactly 1 + attestationAssignments++ + } + } + + assert.GreaterOrEqual(t, attestationAssignments, 1, "epoch should have at least one attestation assignment") } - } - - assert.Equal(t, 1, attestationAssignments, "epoch should have exactly one attestation assignment") + assert.Equal(t, 1, headStateCount, "one of the last 4 epochs should be in head state") + }) + + t.Run(fmt.Sprintf("[%s]: test dashboard overview", dashboardId.id), func(t *testing.T) { + resp := api_types.GetValidatorDashboardResponse{} + e.GET("/api/i/validator-dashboards/{id}", dashboardId.id). + Expect(). + Status(http.StatusOK). + JSON().Decode(&resp) + + numValidators := resp.Data.Validators.Exited + resp.Data.Validators.Offline + resp.Data.Validators.Pending + resp.Data.Validators.Online + resp.Data.Validators.Slashed + assert.Greater(t, numValidators, uint64(0), "dashboard should contain at least one validator") + assert.Greater(t, len(resp.Data.Groups), 0, "dashboard should contain at least one group") + }) + + t.Run(fmt.Sprintf("[%s]: test group summary", dashboardId.id), func(t *testing.T) { + resp := api_types.GetValidatorDashboardSummaryResponse{} + e.GET("/api/i/validator-dashboards/{id}/summary", dashboardId.id). + WithQuery("period", "last_24h"). + WithQuery("limit", "10"). + WithQuery("sort", "efficiency:desc"). + Expect().Status(http.StatusOK).JSON().Decode(&resp) + + assert.Greater(t, len(resp.Data), 0, "dashboard should contain at least one group summary row") + + t.Run(fmt.Sprintf("[%s / %d]: test group details", dashboardId.id, resp.Data[0].GroupId), func(t *testing.T) { + groupResp := api_types.GetValidatorDashboardGroupSummaryResponse{} + e.GET("/api/i/validator-dashboards/{id}/groups/{groupId}/summary", dashboardId.id, resp.Data[0].GroupId). + WithQuery("period", "all_time"). + Expect(). + Status(http.StatusOK). + JSON().Decode(&groupResp) + + assert.Greater(t, groupResp.Data.AttestationsHead.Success+groupResp.Data.AttestationsHead.Failed, uint64(0), "group should have at least head attestation") + assert.Greater(t, groupResp.Data.AttestationsSource.Success+groupResp.Data.AttestationsSource.Failed, uint64(0), "group should have at least source attestation") + assert.Greater(t, groupResp.Data.AttestationsTarget.Success+groupResp.Data.AttestationsTarget.Failed, uint64(0), "group should have at least target attestation") + }) + }) + + t.Run(fmt.Sprintf("[%s]: test group summary chart", dashboardId.id), func(t *testing.T) { + resp := api_types.GetValidatorDashboardSummaryChartResponse{} + e.GET("/api/i/validator-dashboards/{id}/summary-chart", dashboardId.id). + WithQuery("aggregation", "hourly"). + WithQuery("before_ts", time.Now().Unix()). + WithQuery("efficiency_type", "all"). + WithQuery("group_ids", "-1,-2"). + Expect().Status(http.StatusOK).JSON().Decode(&resp) + + assert.Greater(t, len(resp.Data.Categories), 0, "group summary chart categories should not be empty") + assert.Greater(t, len(resp.Data.Series), 0, "group summary chart series should not be empty") + }) + + t.Run(fmt.Sprintf("[%s]: test rewards", dashboardId.id), func(t *testing.T) { + resp := api_types.GetValidatorDashboardRewardsResponse{} + e.GET("/api/i/validator-dashboards/{id}/rewards", dashboardId.id). + WithQuery("limit", 10). + WithQuery("sort", "epoch:desc"). + Expect().Status(http.StatusOK).JSON().Decode(&resp) + + assert.Greater(t, len(resp.Data), 0, "rewards response should not be empty") + assert.LessOrEqual(t, len(resp.Data), 10, "rewards response should not contain more than 10 entries") + assert.True(t, sort.SliceIsSorted(resp.Data, func(i, j int) bool { + return resp.Data[i].Epoch > resp.Data[j].Epoch + }), "rewards should be sorted by epoch in descending order") + + resp = api_types.GetValidatorDashboardRewardsResponse{} + e.GET("/api/i/validator-dashboards/{id}/rewards", dashboardId.id). + WithQuery("limit", 10). + WithQuery("sort", "epoch:asc"). + Expect().Status(http.StatusOK).JSON().Decode(&resp) + assert.Greater(t, len(resp.Data), 0, "rewards response should not be empty") + assert.LessOrEqual(t, len(resp.Data), 10, "rewards response should not contain more than 10 entries") + assert.True(t, sort.SliceIsSorted(resp.Data, func(i, j int) bool { + return resp.Data[i].Epoch < resp.Data[j].Epoch + }), "rewards should be sorted by epoch in ascending order") + + rewardDetails := api_types.GetValidatorDashboardGroupRewardsResponse{} + e.GET("/api/i/validator-dashboards/{id}/groups/{group_id}/rewards/{epoch}", dashboardId.id, resp.Data[0].GroupId, resp.Data[0].Epoch). + WithQuery("limit", 10). + WithQuery("sort", "epoch:asc"). + Expect().Status(http.StatusOK).JSON().Decode(&rewardDetails) + }) + + t.Run(fmt.Sprintf("[%s]: test rewards chart", dashboardId.id), func(t *testing.T) { + resp := api_types.GetValidatorDashboardRewardsChartResponse{} + e.GET("/api/i/validator-dashboards/{id}/rewards-chart", dashboardId.id). + Expect().Status(http.StatusOK).JSON().Decode(&resp) + + assert.Greater(t, len(resp.Data.Categories), 0, "rewards chart categories should not be empty") + assert.Greater(t, len(resp.Data.Series), 0, "rewards chart series should not be empty") + }) } - assert.Equal(t, 1, headStateCount, "one of the last 4 epochs should be in head state") } From 91d5ff0116df9f0d35bc8a4a0b0c0b80cc734677 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Thu, 5 Sep 2024 12:38:31 +0200 Subject: [PATCH 081/187] feat: Adapt to API changes. BEDS-422 --- frontend/types/customFetch.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/types/customFetch.ts b/frontend/types/customFetch.ts index ee27339ce..8b209c949 100644 --- a/frontend/types/customFetch.ts +++ b/frontend/types/customFetch.ts @@ -116,9 +116,9 @@ export const mapping: Record = { }, [API_PATH.DASHBOARD_DELETE_VALIDATOR]: { getPath: values => `/validator-dashboards/${values?.dashboardKey}`, - method: 'DELETE', + method: 'POST', mock: false, - path: '/validator-dashboards/{dashboardKey}', + path: '/validator-dashboards/{dashboard_id}/validators/bulk-deletions', }, [API_PATH.DASHBOARD_EL_DEPOSITS]: { getPath: values => @@ -221,7 +221,7 @@ export const mapping: Record = { getPath: values => `/validator-dashboards/${values?.dashboardKey}/validators`, mock: false, - path: 'validator-dashboards/{dashboard_id}/validators', + path: 'validator-dashboards/{dashboard_id}/validators/bulk-deletions', }, [API_PATH.DASHBOARD_VALIDATOR_REWARDS]: { getPath: values => From f7e9faab9966f0c4931a1bcb18f3631c2be2c463 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Thu, 5 Sep 2024 14:24:19 +0200 Subject: [PATCH 082/187] BEDS-306: monitoring: attempt to deflake rolling check --- .../services/clickhouse_rollings.go | 42 +++++++++++-------- 1 file changed, 24 insertions(+), 18 deletions(-) diff --git a/backend/pkg/monitoring/services/clickhouse_rollings.go b/backend/pkg/monitoring/services/clickhouse_rollings.go index 054fb5738..259bf5a44 100644 --- a/backend/pkg/monitoring/services/clickhouse_rollings.go +++ b/backend/pkg/monitoring/services/clickhouse_rollings.go @@ -8,6 +8,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/gobitfly/beaconchain/pkg/monitoring/constants" ) @@ -66,30 +67,35 @@ func (s *ServiceClickhouseRollings) runChecks() { // context with deadline ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second) defer cancel() - var delta uint64 - err := db.ClickHouseReader.GetContext(ctx, &delta, fmt.Sprintf(` + var tsEpochTable time.Time + err := db.ClickHouseReader.GetContext(ctx, &tsEpochTable, ` SELECT - coalesce(( - SELECT - max(epoch) - FROM holesky.validator_dashboard_data_epoch - WHERE - epoch_timestamp = ( - SELECT - max(epoch_timestamp) - FROM holesky.validator_dashboard_data_epoch)) - MAX(epoch_end), 255) AS delta - FROM - holesky.validator_dashboard_data_rolling_%s - WHERE - validator_index = 0`, rolling)) + max(epoch_timestamp) + FROM holesky.validator_dashboard_data_epoch`, + ) if err != nil { r(constants.Failure, map[string]string{"error": err.Error()}) return } + var epochRollingTable uint64 + err = db.ClickHouseReader.GetContext(ctx, &epochRollingTable, fmt.Sprintf(` + SELECT + max(epoch_end) + FROM holesky.validator_dashboard_data_rolling_%s`, + rolling, + ), + ) + if err != nil { + r(constants.Failure, map[string]string{"error": err.Error()}) + return + } + // convert to timestamp + tsRollingTable := utils.EpochToTime(epochRollingTable) + threshold := 30 * time.Minute + delta := tsEpochTable.Sub(tsRollingTable) // check if delta is out of bounds - threshold := 4 - md := map[string]string{"delta": fmt.Sprintf("%d", delta), "threshold": fmt.Sprintf("%d", threshold)} - if delta > uint64(threshold) { + md := map[string]string{"delta": delta.String(), "threshold": threshold.String()} + if delta > threshold { md["error"] = fmt.Sprintf("delta is over threshold %d", threshold) r(constants.Failure, md) return From b358d7bfc5a0aa597a0bcb5745bd91838b3542ad Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Thu, 5 Sep 2024 14:24:41 +0200 Subject: [PATCH 083/187] BEDS-99: enable support for enabling more log levels --- backend/pkg/commons/utils/config.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/backend/pkg/commons/utils/config.go b/backend/pkg/commons/utils/config.go index 04e813e62..1cc3179eb 100644 --- a/backend/pkg/commons/utils/config.go +++ b/backend/pkg/commons/utils/config.go @@ -478,9 +478,22 @@ func setCLConfig(cfg *types.Config) error { cfg.Chain.ClConfig = *chainConfig } - // Set log level based on environment variable - if strings.ToLower(os.Getenv("LOG_LEVEL")) == "debug" { + // rewrite to match to allow trace as well + switch strings.ToLower(os.Getenv("LOG_LEVEL")) { + case "trace": + logrus.SetLevel(logrus.TraceLevel) + case "debug": logrus.SetLevel(logrus.DebugLevel) + case "info": + logrus.SetLevel(logrus.InfoLevel) + case "warn": + logrus.SetLevel(logrus.WarnLevel) + case "error": + logrus.SetLevel(logrus.ErrorLevel) + case "fatal": + logrus.SetLevel(logrus.FatalLevel) + case "panic": + logrus.SetLevel(logrus.PanicLevel) } return nil From 753c2fec84255ce4041c920d26e1bd919eb01e8b Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Thu, 5 Sep 2024 14:36:17 +0200 Subject: [PATCH 084/187] feat: Adapt frontend. BEDS-422 --- frontend/components/dashboard/ValidatorManagementModal.vue | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/ValidatorManagementModal.vue index c7bd58024..1f4aa8308 100644 --- a/frontend/components/dashboard/ValidatorManagementModal.vue +++ b/frontend/components/dashboard/ValidatorManagementModal.vue @@ -154,8 +154,8 @@ const removeValidators = async (validators?: NumberOrString[]) => { await fetch( API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT, { - method: 'DELETE', - query: { validators: validators.join(',') }, + body: JSON.stringify({ validators }), // Move validators into the body + method: 'POST', }, { dashboardKey: dashboardKey.value }, ) From cf179d282be782312c96e852f161b82bcba460af Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Thu, 5 Sep 2024 14:39:12 +0200 Subject: [PATCH 085/187] feat: delete unnecessery comment. BEDS-422 --- frontend/components/dashboard/ValidatorManagementModal.vue | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/ValidatorManagementModal.vue index 1f4aa8308..35e6f0642 100644 --- a/frontend/components/dashboard/ValidatorManagementModal.vue +++ b/frontend/components/dashboard/ValidatorManagementModal.vue @@ -154,7 +154,7 @@ const removeValidators = async (validators?: NumberOrString[]) => { await fetch( API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT, { - body: JSON.stringify({ validators }), // Move validators into the body + body: JSON.stringify({ validators }), method: 'POST', }, { dashboardKey: dashboardKey.value }, From f8a66b1c2b206dfcaab7948e5c88fb7b1e8711fc Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Thu, 5 Sep 2024 15:43:42 +0200 Subject: [PATCH 086/187] feat: Add necessery changes for the api update. BEDS-422 --- frontend/components/dashboard/ValidatorManagementModal.vue | 2 +- frontend/types/customFetch.ts | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/ValidatorManagementModal.vue index 35e6f0642..f64946de6 100644 --- a/frontend/components/dashboard/ValidatorManagementModal.vue +++ b/frontend/components/dashboard/ValidatorManagementModal.vue @@ -152,7 +152,7 @@ const removeValidators = async (validators?: NumberOrString[]) => { } await fetch( - API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT, + API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT_DELETE, { body: JSON.stringify({ validators }), method: 'POST', diff --git a/frontend/types/customFetch.ts b/frontend/types/customFetch.ts index 8b209c949..19af0e464 100644 --- a/frontend/types/customFetch.ts +++ b/frontend/types/customFetch.ts @@ -30,6 +30,7 @@ export enum API_PATH { DASHBOARD_VALIDATOR_GROUPS = '/validator-dashboards/groups', DASHBOARD_VALIDATOR_INDICES = '/validator-dashboards/indices', DASHBOARD_VALIDATOR_MANAGEMENT = '/validator-dashboards/validators', + DASHBOARD_VALIDATOR_MANAGEMENT_DELETE = '/validator-dashboards/validators/bulk-deletions', DASHBOARD_VALIDATOR_REWARDS = '/dashboard/validatorRewards', DASHBOARD_VALIDATOR_REWARDS_CHART = '/dashboard/validatorRewardsChart', DASHBOARD_VALIDATOR_REWARDS_DETAILS = '/dashboard/validatorRewardsDetails', @@ -223,6 +224,12 @@ export const mapping: Record = { mock: false, path: 'validator-dashboards/{dashboard_id}/validators/bulk-deletions', }, + [API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT_DELETE]: { + getPath: values => + `/validator-dashboards/${values?.dashboardKey}/validators/bulk-deletions`, + mock: false, + path: 'validator-dashboards/{dashboard_id}/validators/bulk-deletions', + }, [API_PATH.DASHBOARD_VALIDATOR_REWARDS]: { getPath: values => `/validator-dashboards/${values?.dashboardKey}/rewards`, From f2f0a1db379e01ed1f716753c12599e79f34980f Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Thu, 5 Sep 2024 16:00:03 +0200 Subject: [PATCH 087/187] feat: Add changes necessery changes. BEDS-422 --- frontend/types/customFetch.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/frontend/types/customFetch.ts b/frontend/types/customFetch.ts index 19af0e464..dcc3ec474 100644 --- a/frontend/types/customFetch.ts +++ b/frontend/types/customFetch.ts @@ -117,9 +117,9 @@ export const mapping: Record = { }, [API_PATH.DASHBOARD_DELETE_VALIDATOR]: { getPath: values => `/validator-dashboards/${values?.dashboardKey}`, - method: 'POST', + method: 'DELETE', mock: false, - path: '/validator-dashboards/{dashboard_id}/validators/bulk-deletions', + path: '/validator-dashboards/{dashboardKey}', }, [API_PATH.DASHBOARD_EL_DEPOSITS]: { getPath: values => @@ -222,7 +222,7 @@ export const mapping: Record = { getPath: values => `/validator-dashboards/${values?.dashboardKey}/validators`, mock: false, - path: 'validator-dashboards/{dashboard_id}/validators/bulk-deletions', + path: 'validator-dashboards/{dashboard_id}/validators', }, [API_PATH.DASHBOARD_VALIDATOR_MANAGEMENT_DELETE]: { getPath: values => From 918be99f6c0c6ca2917a56b030e3c02ff8aea30b Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 5 Sep 2024 17:24:46 +0200 Subject: [PATCH 088/187] ci: tag docker images (#835) BEDS-438 --- .github/workflows/backend-publish-docker.yml | 5 +++++ .github/workflows/frontend-publish-docker.yml | 2 ++ 2 files changed, 7 insertions(+) diff --git a/.github/workflows/backend-publish-docker.yml b/.github/workflows/backend-publish-docker.yml index b3ea36103..1f8032905 100644 --- a/.github/workflows/backend-publish-docker.yml +++ b/.github/workflows/backend-publish-docker.yml @@ -6,6 +6,7 @@ on: push: paths: - 'backend/**' + - '.github/**' branches: - main - staging @@ -27,6 +28,9 @@ jobs: steps: - name: Checkout repository uses: actions/checkout@v4 + - name: Set version + run: | + echo "BEACONCHAIN_VERSION=$(TZ=UTC0 git show --quiet --date='format-local:%Y%m%d%H%M%S' --format="%cd" $GITHUB_SHA)-$(git describe $GITHUB_SHA --always --tags))" >> "$GITHUB_ENV" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. @@ -42,6 +46,7 @@ jobs: uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: ${{ env.BEACONCHAIN_VERSION }} # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. diff --git a/.github/workflows/frontend-publish-docker.yml b/.github/workflows/frontend-publish-docker.yml index 6e092c37d..1ee6c4391 100644 --- a/.github/workflows/frontend-publish-docker.yml +++ b/.github/workflows/frontend-publish-docker.yml @@ -6,6 +6,7 @@ on: push: paths: - 'frontend/**' + - '.github/**' branches: - main - staging @@ -45,6 +46,7 @@ jobs: uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} + tags: ${{ env.BEACONCHAIN_VERSION }} # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. From 910ade94b5b0622eef48a02826b2f8a8757c9a37 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Fri, 6 Sep 2024 10:25:28 +0200 Subject: [PATCH 089/187] (BEDS-442) accept charset param in request content type (#837) --- backend/pkg/api/handlers/common.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index 54f25b5c9..ecc9794db 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -67,6 +67,7 @@ var ( reEmail = regexp.MustCompile("^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*$") rePassword = regexp.MustCompile(`^.{5,}$`) reEmailUserToken = regexp.MustCompile(`^[a-z0-9]{40}$`) + reJsonContentType = regexp.MustCompile(`^application\/json(;.*)?$`) ) const ( @@ -184,7 +185,7 @@ func (v *validationError) checkUserEmailToken(token string) string { // return error only if internal error occurs, otherwise add error to validationError and/or return nil func (v *validationError) checkBody(data interface{}, r *http.Request) error { // check if content type is application/json - if contentType := r.Header.Get("Content-Type"); contentType != "application/json" { + if contentType := r.Header.Get("Content-Type"); !reJsonContentType.MatchString(contentType) { v.add("request body", "'Content-Type' header must be 'application/json'") } From e61defb55fcda999feef8655520911a2c4474759 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 6 Sep 2024 13:46:17 +0200 Subject: [PATCH 090/187] ci: fix docker-tags (remove dash at end) (#839) BEDS-438 --- .github/workflows/backend-publish-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/backend-publish-docker.yml b/.github/workflows/backend-publish-docker.yml index 1f8032905..393585908 100644 --- a/.github/workflows/backend-publish-docker.yml +++ b/.github/workflows/backend-publish-docker.yml @@ -30,7 +30,7 @@ jobs: uses: actions/checkout@v4 - name: Set version run: | - echo "BEACONCHAIN_VERSION=$(TZ=UTC0 git show --quiet --date='format-local:%Y%m%d%H%M%S' --format="%cd" $GITHUB_SHA)-$(git describe $GITHUB_SHA --always --tags))" >> "$GITHUB_ENV" + echo "BEACONCHAIN_VERSION=$(TZ=UTC0 git show --quiet --date='format-local:%Y%m%d%H%M%S' --format="%cd" $GITHUB_SHA)-$(git describe $GITHUB_SHA --always --tags)" >> "$GITHUB_ENV" - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. From c8b6d383ab87b20eafeee1908b6dc358b3549c43 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 9 Sep 2024 09:08:05 +0200 Subject: [PATCH 091/187] Beds 90/fix ens long names (#840) * ci: fix docker-tags (remove dash at end) BEDS-438 * ens: fix handling reg of long names BEDS-90 --- backend/pkg/commons/db/ens.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/backend/pkg/commons/db/ens.go b/backend/pkg/commons/db/ens.go index f1cd54e64..46001df56 100644 --- a/backend/pkg/commons/db/ens.go +++ b/backend/pkg/commons/db/ens.go @@ -174,6 +174,11 @@ func (bigtable *Bigtable) TransformEnsNameRegistered(blk *types.Eth1Block, cache log.WarnWithFields(logFields, "error unpacking ens-log") continue } + if err = verifyName(r.Name); err != nil { + logFields["error"] = err + log.WarnWithFields(logFields, "error verifying ens-name") + continue + } keys[fmt.Sprintf("%s:ENS:V:N:%s", bigtable.chainId, r.Name)] = true keys[fmt.Sprintf("%s:ENS:V:A:%x", bigtable.chainId, r.Owner)] = true } else if bytes.Equal(lTopic, ensContracts.ENSETHRegistrarControllerParsedABI.Events["NameRenewed"].ID.Bytes()) { @@ -185,6 +190,11 @@ func (bigtable *Bigtable) TransformEnsNameRegistered(blk *types.Eth1Block, cache log.WarnWithFields(logFields, "error unpacking ens-log") continue } + if err = verifyName(r.Name); err != nil { + logFields["error"] = err + log.WarnWithFields(logFields, "error verifying ens-name") + continue + } keys[fmt.Sprintf("%s:ENS:V:N:%s", bigtable.chainId, r.Name)] = true } } else if ensContract == "OldEnsRegistrarController" { From a09285d353b41cdc6c1cfd276c61a3a785d85e74 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 08:07:25 +0000 Subject: [PATCH 092/187] fix(dashboard): source head epoch from slot viz duties response --- backend/pkg/api/data_access/vdb_slotviz.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_slotviz.go b/backend/pkg/api/data_access/vdb_slotviz.go index 98d8c60d9..67c494a79 100644 --- a/backend/pkg/api/data_access/vdb_slotviz.go +++ b/backend/pkg/api/data_access/vdb_slotviz.go @@ -4,7 +4,6 @@ import ( "context" t "github.com/gobitfly/beaconchain/pkg/api/types" - "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/utils" ) @@ -17,8 +16,16 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da validatorsMap := utils.SliceToMap(validatorsArray) + maxValidatorsInResponse := 6 + + dutiesInfo, err := d.services.GetCurrentDutiesInfo() + if err != nil { + return nil, err + } + // Get min/max slot/epoch - headEpoch := cache.LatestEpoch.Get() // Reminder: Currently it is possible to get the head epoch from the cache but nothing sets it in v2 + headEpoch := utils.EpochOfSlot(dutiesInfo.LatestSlot) + slotsPerEpoch := utils.Config.Chain.ClConfig.SlotsPerEpoch minEpoch := uint64(0) @@ -27,13 +34,6 @@ func (d *DataAccessService) GetValidatorDashboardSlotViz(ctx context.Context, da } maxEpoch := headEpoch + 1 - maxValidatorsInResponse := 6 - - dutiesInfo, err := d.services.GetCurrentDutiesInfo() - if err != nil { - return nil, err - } - epochToIndexMap := make(map[uint64]uint64) slotToIndexMap := make(map[uint64]uint64) From bc57bfd680e69bd97c75a239b659f35ca510f80b Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:54:53 +0000 Subject: [PATCH 093/187] simplify extraction of user ids from the notification map --- backend/pkg/notification/notifications.go | 63 +++++++++++------------ 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index e68f5f3f6..71ec9e0eb 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -8,6 +8,8 @@ import ( "encoding/hex" "encoding/json" "errors" + "maps" + "slices" "fmt" "html/template" @@ -283,28 +285,28 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { return nil, fmt.Errorf("error getting dashboard definitions: %v", err) } - // Now initialize the validator dashboard configuration map - validatorDashboardConfig := &types.ValidatorDashboardConfig{ - DashboardsByUserId: make(map[types.UserId]map[types.DashboardId]*types.ValidatorDashboard), - } - for _, row := range dashboardDefinitions { - if validatorDashboardConfig.DashboardsByUserId[row.UserId] == nil { - validatorDashboardConfig.DashboardsByUserId[row.UserId] = make(map[types.DashboardId]*types.ValidatorDashboard) - } - if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] == nil { - validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] = &types.ValidatorDashboard{ - Name: row.DashboardName, - Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), - } - } - if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] == nil { - validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ - Name: row.GroupName, - Validators: []uint64{}, - } - } - validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) - } + // // Now initialize the validator dashboard configuration map + // validatorDashboardConfig := &types.ValidatorDashboardConfig{ + // DashboardsByUserId: make(map[types.UserId]map[types.DashboardId]*types.ValidatorDashboard), + // } + // for _, row := range dashboardDefinitions { + // if validatorDashboardConfig.DashboardsByUserId[row.UserId] == nil { + // validatorDashboardConfig.DashboardsByUserId[row.UserId] = make(map[types.DashboardId]*types.ValidatorDashboard) + // } + // if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] == nil { + // validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId] = &types.ValidatorDashboard{ + // Name: row.DashboardName, + // Groups: make(map[types.DashboardGroupId]*types.ValidatorDashboardGroup), + // } + // } + // if validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] == nil { + // validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId] = &types.ValidatorDashboardGroup{ + // Name: row.GroupName, + // Validators: []uint64{}, + // } + // } + // validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators = append(validatorDashboardConfig.DashboardsByUserId[row.UserId][row.DashboardId].Groups[row.GroupId].Validators, uint64(row.ValidatorIndex)) + // } // TODO: pass the validatorDashboardConfig to the notification collection functions // The following functions will collect the notifications and add them to the @@ -526,7 +528,7 @@ func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useD for state, subs := range stateToSub { subArray := make([]int64, 0) for subID := range subs { - subArray = append(subArray, int64(subID)) + subArray = append(subArray, int64(subID)) //nolint:gosec } _, err := db.FrontendWriterDB.Exec(`UPDATE users_subscriptions SET internal_state = $1 WHERE id = ANY($2)`, state, pq.Int64Array(subArray)) if err != nil { @@ -582,10 +584,7 @@ func getNetwork() string { } func queuePushNotification(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { - userIDs := []types.UserId{} - for userID := range notificationsByUserID { - userIDs = append(userIDs, userID) - } + userIDs := slices.Collect(maps.Keys(notificationsByUserID)) tokensByUserID, err := GetUserPushTokenByIds(userIDs) if err != nil { @@ -690,10 +689,8 @@ func sendPushNotifications(useDB *sqlx.DB) error { } func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { - userIDs := []types.UserId{} - for userID := range notificationsByUserID { - userIDs = append(userIDs, userID) - } + userIDs := slices.Collect(maps.Keys(notificationsByUserID)) + emailsByUserID, err := GetUserEmailsByIds(userIDs) if err != nil { metrics.Errors.WithLabelValues("notifications_get_user_mail_by_id").Inc() @@ -1009,7 +1006,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { go func(n types.TransitWebhook) { if n.Content.Webhook.Retries > 0 { - time.Sleep(time.Duration(n.Content.Webhook.Retries) * time.Second) + time.Sleep(time.Duration(n.Content.Webhook.Retries) * time.Second) //nolint:gosec } resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) if err != nil { @@ -1127,7 +1124,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { break // stop } // sleep between retries - time.Sleep(time.Duration(webhook.Retries) * time.Second) + time.Sleep(time.Duration(webhook.Retries) * time.Second) //nolint:gosec reqBody := new(bytes.Buffer) err := json.NewEncoder(reqBody).Encode(reqs[i].Content.DiscordRequest) From 3a148a7b108216aa7abf63aee42c5f84e62821bc Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:58:32 +0000 Subject: [PATCH 094/187] chore(notifications): please linter --- backend/pkg/notification/notifications.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 71ec9e0eb..4b5512f86 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -528,7 +528,7 @@ func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useD for state, subs := range stateToSub { subArray := make([]int64, 0) for subID := range subs { - subArray = append(subArray, int64(subID)) //nolint:gosec + subArray = append(subArray, int64(subID)) } _, err := db.FrontendWriterDB.Exec(`UPDATE users_subscriptions SET internal_state = $1 WHERE id = ANY($2)`, state, pq.Int64Array(subArray)) if err != nil { @@ -1006,7 +1006,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { go func(n types.TransitWebhook) { if n.Content.Webhook.Retries > 0 { - time.Sleep(time.Duration(n.Content.Webhook.Retries) * time.Second) //nolint:gosec + time.Sleep(time.Duration(n.Content.Webhook.Retries) * time.Second) } resp, err := client.Post(n.Content.Webhook.Url, "application/json", reqBody) if err != nil { @@ -1124,7 +1124,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { break // stop } // sleep between retries - time.Sleep(time.Duration(webhook.Retries) * time.Second) //nolint:gosec + time.Sleep(time.Duration(webhook.Retries) * time.Second) reqBody := new(bytes.Buffer) err := json.NewEncoder(reqBody).Encode(reqs[i].Content.DiscordRequest) From f8265c5851a59abf5a0ff668c08e522e4b4fcfd1 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:59:24 +0000 Subject: [PATCH 095/187] chore(notifications): disable loading dashboard configurations --- backend/pkg/notification/notifications.go | 42 +++++++++++------------ 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 4b5512f86..59399f3b7 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -263,27 +263,27 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { ValidatorIndex types.ValidatorIndex `db:"validator_index"` } - log.Infof("retrieving dashboard definitions") - // Retrieve all dashboard definitions to be able to retrieve validators included in - // the group notification subscriptions - // TODO: add a filter to retrieve only groups that have notifications enabled - // Needs a new field in the db - var dashboardDefinitions []dashboardDefinitionRow - err = db.AlloyWriter.Select(&dashboardDefinitions, ` - select - users_val_dashboards.id as dashboard_id, - users_val_dashboards.name as dashboard_name, - users_val_dashboards.user_id, - users_val_dashboards_groups.id as group_id, - users_val_dashboards_groups.name as group_name, - users_val_dashboards_validators.validator_index - from users_val_dashboards - left join users_val_dashboards_groups on users_val_dashboards_groups.dashboard_id = users_val_dashboards.id - left join users_val_dashboards_validators on users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id; - `) - if err != nil { - return nil, fmt.Errorf("error getting dashboard definitions: %v", err) - } + // log.Infof("retrieving dashboard definitions") + // // Retrieve all dashboard definitions to be able to retrieve validators included in + // // the group notification subscriptions + // // TODO: add a filter to retrieve only groups that have notifications enabled + // // Needs a new field in the db + // var dashboardDefinitions []dashboardDefinitionRow + // err = db.AlloyWriter.Select(&dashboardDefinitions, ` + // select + // users_val_dashboards.id as dashboard_id, + // users_val_dashboards.name as dashboard_name, + // users_val_dashboards.user_id, + // users_val_dashboards_groups.id as group_id, + // users_val_dashboards_groups.name as group_name, + // users_val_dashboards_validators.validator_index + // from users_val_dashboards + // left join users_val_dashboards_groups on users_val_dashboards_groups.dashboard_id = users_val_dashboards.id + // left join users_val_dashboards_validators on users_val_dashboards_validators.dashboard_id = users_val_dashboards_groups.dashboard_id AND users_val_dashboards_validators.group_id = users_val_dashboards_groups.id; + // `) + // if err != nil { + // return nil, fmt.Errorf("error getting dashboard definitions: %v", err) + // } // // Now initialize the validator dashboard configuration map // validatorDashboardConfig := &types.ValidatorDashboardConfig{ From 23ed6e285f69603811e1085f7bfaa7485428f2aa Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 09:59:46 +0000 Subject: [PATCH 096/187] chore(notifications): remove unused code for dashboard configurations --- backend/pkg/notification/notifications.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 59399f3b7..a8a6209eb 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -254,14 +254,14 @@ func collectNotifications(epoch uint64) (types.NotificationsPerUserId, error) { log.Infof("started collecting notifications") - type dashboardDefinitionRow struct { - DashboardId types.DashboardId `db:"dashboard_id"` - DashboardName string `db:"dashboard_name"` - UserId types.UserId `db:"user_id"` - GroupId types.DashboardGroupId `db:"group_id"` - GroupName string `db:"group_name"` - ValidatorIndex types.ValidatorIndex `db:"validator_index"` - } + // type dashboardDefinitionRow struct { + // DashboardId types.DashboardId `db:"dashboard_id"` + // DashboardName string `db:"dashboard_name"` + // UserId types.UserId `db:"user_id"` + // GroupId types.DashboardGroupId `db:"group_id"` + // GroupName string `db:"group_name"` + // ValidatorIndex types.ValidatorIndex `db:"validator_index"` + // } // log.Infof("retrieving dashboard definitions") // // Retrieve all dashboard definitions to be able to retrieve validators included in From 601c74069d621a736995642217e676d64ddfd97f Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Mon, 9 Sep 2024 11:00:55 +0200 Subject: [PATCH 097/187] fix(DashboardValidatorManagementModal): anchor footer at the bottom See: BEDS-386 --- frontend/.vscode/settings.json | 25 ++++++++++--------- .../dashboard/ValidatorManagementModal.vue | 1 + 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index 2ddbc4e8a..a97dbcf2a 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -1,17 +1,18 @@ { "conventionalCommits.scopes": [ - "checkout", - "ci", - "customFetch", - "DashboardChartSummaryChartFilter", - "DashboardGroupManagementModal", - "eslint", - "git", - "i18n", - "mainHeader", - "qrCode", - "vscode" -], + "checkout", + "ci", + "customFetch", + "DashboardChartSummaryChartFilter", + "DashboardGroupManagementModal", + "eslint", + "git", + "i18n", + "mainHeader", + "qrCode", + "vscode", + "DashboardValidatorManagmentModal" + ], "editor.codeActionsOnSave": { "source.fixAll.eslint": "always" }, diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/ValidatorManagementModal.vue index f64946de6..d682171a8 100644 --- a/frontend/components/dashboard/ValidatorManagementModal.vue +++ b/frontend/components/dashboard/ValidatorManagementModal.vue @@ -645,6 +645,7 @@ const premiumLimit = computed( display: flex; flex-direction: column; overflow-y: hidden; + justify-content: space-between; :deep(.p-datatable-wrapper) { flex-grow: 1; From 67b263cf5acf39e1f77d5ed7d76523d919672388 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Mon, 9 Sep 2024 12:52:00 +0200 Subject: [PATCH 098/187] feat(DashboardValidatorManagmentModal): add `validator status` to the `expendable` See: BEDS-386 --- .../components/dashboard/ValidatorManagementModal.vue | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/ValidatorManagementModal.vue index d682171a8..94762a647 100644 --- a/frontend/components/dashboard/ValidatorManagementModal.vue +++ b/frontend/components/dashboard/ValidatorManagementModal.vue @@ -553,6 +553,15 @@ const premiumLimit = computed( " /> +
+
+ {{ $t("dashboard.validator.col.status") }} +
+ +
{{ $t("dashboard.validator.col.withdrawal_credential") }} From 1c543c95ffd4e18074eaefc988ad13a4d191de34 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Mon, 9 Sep 2024 13:29:39 +0200 Subject: [PATCH 099/187] refactor(DashboardValidatorManagmentModal): rename component to match the file structure --- ...rManagementModal.vue => DashboardValidatorManagementModal.vue} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename frontend/components/dashboard/{ValidatorManagementModal.vue => DashboardValidatorManagementModal.vue} (100%) diff --git a/frontend/components/dashboard/ValidatorManagementModal.vue b/frontend/components/dashboard/DashboardValidatorManagementModal.vue similarity index 100% rename from frontend/components/dashboard/ValidatorManagementModal.vue rename to frontend/components/dashboard/DashboardValidatorManagementModal.vue From c8d0e0f993ebc902d9d433c1a9f7e7c68feb83be Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 12:14:42 +0000 Subject: [PATCH 100/187] feat(notifications): store queued notifications in network db --- backend/pkg/commons/db/subscriptions.go | 4 +- backend/pkg/notification/notifications.go | 89 +++++++++++------------ 2 files changed, 46 insertions(+), 47 deletions(-) diff --git a/backend/pkg/commons/db/subscriptions.go b/backend/pkg/commons/db/subscriptions.go index 3edbc3d5f..639f70c8b 100644 --- a/backend/pkg/commons/db/subscriptions.go +++ b/backend/pkg/commons/db/subscriptions.go @@ -261,8 +261,8 @@ func GetSubscriptions(filter GetSubscriptionsFilter) ([]*types.Subscription, err } // UpdateSubscriptionsLastSent updates `last_sent_ts` column of the `users_subscriptions` table. -func UpdateSubscriptionsLastSent(subscriptionIDs []uint64, sent time.Time, epoch uint64, useDB *sqlx.DB) error { - _, err := useDB.Exec(` +func UpdateSubscriptionsLastSent(subscriptionIDs []uint64, sent time.Time, epoch uint64) error { + _, err := FrontendWriterDB.Exec(` UPDATE users_subscriptions SET last_sent_ts = TO_TIMESTAMP($1), last_sent_epoch = $2 WHERE id = ANY($3)`, sent.Unix(), epoch, pq.Array(subscriptionIDs)) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index a8a6209eb..7b148d0d5 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -34,7 +34,6 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" - "github.com/jmoiron/sqlx" "github.com/lib/pq" "github.com/rocket-pool/rocketpool-go/utils/eth" "golang.org/x/text/cases" @@ -126,7 +125,7 @@ func notificationCollector() { break } - queueNotifications(notifications, db.FrontendWriterDB) // this caused the collected notifications to be queued and sent + queueNotifications(notifications) // this caused the collected notifications to be queued and sent // Network DB Notifications (user related, must only run on one instance ever!!!!) if utils.Config.Notifications.UserDBNotifications { @@ -139,7 +138,7 @@ func notificationCollector() { continue } - queueNotifications(userNotifications, db.FrontendWriterDB) + queueNotifications(userNotifications) } log.InfoWithFields(log.Fields{"notifications": len(notifications), "duration": time.Since(start), "epoch": epoch}, "notifications completed") @@ -177,12 +176,12 @@ func notificationSender() { } log.Infof("lock obtained") - err = dispatchNotifications(db.FrontendWriterDB) + err = dispatchNotifications() if err != nil { log.Error(err, "error dispatching notifications", 0) } - err = garbageCollectNotificationQueue(db.FrontendWriterDB) + err = garbageCollectNotificationQueue() if err != nil { log.Error(err, "error garbage collecting notification queue", 0) } @@ -464,20 +463,20 @@ func collectUserDbNotifications(epoch uint64) (types.NotificationsPerUserId, err return notificationsByUserID, nil } -func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) { +func queueNotifications(notificationsByUserID types.NotificationsPerUserId) { subByEpoch := map[uint64][]uint64{} - err := queueEmailNotifications(notificationsByUserID, useDB) + err := queueEmailNotifications(notificationsByUserID) if err != nil { log.Error(err, "error queuing email notifications", 0) } - err = queuePushNotification(notificationsByUserID, useDB) + err = queuePushNotification(notificationsByUserID) if err != nil { log.Error(err, "error queuing push notifications", 0) } - err = queueWebhookNotifications(notificationsByUserID, useDB) + err = queueWebhookNotifications(notificationsByUserID) if err != nil { log.Error(err, "error queuing webhook notifications", 0) } @@ -498,7 +497,7 @@ func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useD // obsolete as notifications are anyway sent on a per-epoch basis for epoch, subIDs := range subByEpoch { // update that we've queued the subscription (last sent rather means last queued) - err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch, useDB) + err := db.UpdateSubscriptionsLastSent(subIDs, time.Now(), epoch) if err != nil { log.Error(err, "error updating sent-time of sent notifications", 0) metrics.Errors.WithLabelValues("notifications_updating_sent_time").Inc() @@ -537,23 +536,23 @@ func queueNotifications(notificationsByUserID types.NotificationsPerUserId, useD } } -func dispatchNotifications(useDB *sqlx.DB) error { - err := sendEmailNotifications(useDB) +func dispatchNotifications() error { + err := sendEmailNotifications() if err != nil { return fmt.Errorf("error sending email notifications, err: %w", err) } - err = sendPushNotifications(useDB) + err = sendPushNotifications() if err != nil { return fmt.Errorf("error sending push notifications, err: %w", err) } - err = sendWebhookNotifications(useDB) + err = sendWebhookNotifications() if err != nil { return fmt.Errorf("error sending webhook notifications, err: %w", err) } - err = sendDiscordNotifications(useDB) + err = sendDiscordNotifications() if err != nil { return fmt.Errorf("error sending webhook discord notifications, err: %w", err) } @@ -562,8 +561,8 @@ func dispatchNotifications(useDB *sqlx.DB) error { } // garbageCollectNotificationQueue deletes entries from the notification queue that have been processed -func garbageCollectNotificationQueue(useDB *sqlx.DB) error { - rows, err := useDB.Exec(`DELETE FROM notification_queue WHERE (sent < now() - INTERVAL '30 minutes') OR (created < now() - INTERVAL '1 hour')`) +func garbageCollectNotificationQueue() error { + rows, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE (sent < now() - INTERVAL '30 minutes') OR (created < now() - INTERVAL '1 hour')`) if err != nil { return fmt.Errorf("error deleting from notification_queue %w", err) } @@ -583,7 +582,7 @@ func getNetwork() string { return "" } -func queuePushNotification(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { +func queuePushNotification(notificationsByUserID types.NotificationsPerUserId) error { userIDs := slices.Collect(maps.Keys(notificationsByUserID)) tokensByUserID, err := GetUserPushTokenByIds(userIDs) @@ -636,7 +635,7 @@ func queuePushNotification(notificationsByUserID types.NotificationsPerUserId, u Messages: batch, } - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'push', $2)`, time.Now(), transitPushContent) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'push', $2)`, time.Now(), transitPushContent) if err != nil { log.Error(err, "error writing transit push notification to db", 0) return @@ -646,10 +645,10 @@ func queuePushNotification(notificationsByUserID types.NotificationsPerUserId, u return nil } -func sendPushNotifications(useDB *sqlx.DB) error { +func sendPushNotifications() error { var notificationQueueItem []types.TransitPush - err := useDB.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -679,7 +678,7 @@ func sendPushNotifications(useDB *sqlx.DB) error { metrics.NotificationsSent.WithLabelValues("push", "200").Add(float64(len(n.Content.Messages))) } - _, err = useDB.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) if err != nil { return fmt.Errorf("error updating sent status for push notification with id: %v, err: %w", n.Id, err) } @@ -688,7 +687,7 @@ func sendPushNotifications(useDB *sqlx.DB) error { return nil } -func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { +func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId) error { userIDs := slices.Collect(maps.Keys(notificationsByUserID)) emailsByUserID, err := GetUserEmailsByIds(userIDs) @@ -777,7 +776,7 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, Attachments: attachments, } - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'email', $2)`, time.Now(), transitEmailContent) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES ($1, 'email', $2)`, time.Now(), transitEmailContent) if err != nil { log.Error(err, "error writing transit email to db", 0) } @@ -786,10 +785,10 @@ func queueEmailNotifications(notificationsByUserID types.NotificationsPerUserId, return nil } -func sendEmailNotifications(useDb *sqlx.DB) error { +func sendEmailNotifications() error { var notificationQueueItem []types.TransitEmail - err := useDb.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -812,7 +811,7 @@ func sendEmailNotifications(useDb *sqlx.DB) error { metrics.NotificationsSent.WithLabelValues("email", "200").Inc() } } - _, err = useDb.Exec(`UPDATE notification_queue set sent = now() where id = $1`, n.Id) + _, err = db.WriterDb.Exec(`UPDATE notification_queue set sent = now() where id = $1`, n.Id) if err != nil { return fmt.Errorf("error updating sent status for email notification with id: %v, err: %w", n.Id, err) } @@ -820,10 +819,10 @@ func sendEmailNotifications(useDb *sqlx.DB) error { return nil } -func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserId, useDB *sqlx.DB) error { +func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserId) error { for userID, userNotifications := range notificationsByUserID { var webhooks []types.UserWebhook - err := useDB.Select(&webhooks, ` + err := db.FrontendWriterDB.Select(&webhooks, ` SELECT id, user_id, @@ -861,7 +860,7 @@ func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserI if len(notifications) > 0 { // reset Retries if w.Retries > 5 && w.LastSent.Valid && w.LastSent.Time.Add(time.Hour).Before(time.Now()) { - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = 0 WHERE id = $1;`, w.ID) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = 0 WHERE id = $1;`, w.ID) if err != nil { log.Error(err, "error updating users_webhooks table; setting retries to zero", 0) continue @@ -938,7 +937,7 @@ func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserI } // process notifs for _, n := range notifs { - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), $1, $2);`, n.Channel, n.Content) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), $1, $2);`, n.Channel, n.Content) if err != nil { log.Error(err, "error inserting into webhooks_queue", 0) } else { @@ -948,7 +947,7 @@ func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserI // process discord notifs for _, dNotifs := range discordNotifMap { for _, n := range dNotifs { - _, err = useDB.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), 'webhook_discord', $1);`, n) + _, err = db.WriterDb.Exec(`INSERT INTO notification_queue (created, channel, content) VALUES (now(), 'webhook_discord', $1);`, n) if err != nil { log.Error(err, "error inserting into webhooks_queue (discord)", 0) continue @@ -961,10 +960,10 @@ func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserI return nil } -func sendWebhookNotifications(useDB *sqlx.DB) error { +func sendWebhookNotifications() error { var notificationQueueItem []types.TransitWebhook - err := useDB.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -981,7 +980,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { for _, n := range notificationQueueItem { // do not retry after 5 attempts if n.Content.Webhook.Retries > 5 { - _, err := db.FrontendWriterDB.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) + _, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) if err != nil { return fmt.Errorf("error deleting from notification queue: %w", err) } @@ -997,7 +996,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { _, err = url.Parse(n.Content.Webhook.Url) if err != nil { - _, err := db.FrontendWriterDB.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) + _, err := db.WriterDb.Exec(`DELETE FROM notification_queue WHERE id = $1`, n.Id) if err != nil { return fmt.Errorf("error deleting from notification queue: %w", err) } @@ -1017,14 +1016,14 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { } defer resp.Body.Close() - _, err = useDB.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() WHERE id = $1`, n.Id) if err != nil { log.Error(err, "error updating notification_queue table", 0) return } if resp != nil && resp.StatusCode < 400 { - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = 0, last_sent = now() WHERE id = $1;`, n.Content.Webhook.ID) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = 0, last_sent = now() WHERE id = $1;`, n.Content.Webhook.ID) if err != nil { log.Error(err, "error updating users_webhooks table", 0) return @@ -1042,7 +1041,7 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { errResp.Body = string(b) } - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp) if err != nil { log.Error(err, "error updating users_webhooks table", 0) return @@ -1053,10 +1052,10 @@ func sendWebhookNotifications(useDB *sqlx.DB) error { return nil } -func sendDiscordNotifications(useDB *sqlx.DB) error { +func sendDiscordNotifications() error { var notificationQueueItem []types.TransitDiscord - err := useDB.Select(¬ificationQueueItem, `SELECT + err := db.WriterDb.Select(¬ificationQueueItem, `SELECT id, created, sent, @@ -1077,7 +1076,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { for _, n := range notificationQueueItem { // purge the event from existence if the retry counter is over 5 if n.Content.Webhook.Retries > 5 { - _, err = db.FrontendWriterDB.Exec(`DELETE FROM notification_queue where id = $1`, n.Id) + _, err = db.WriterDb.Exec(`DELETE FROM notification_queue where id = $1`, n.Id) if err != nil { log.Warnf("failed to delete notification from queue: %v", err) } @@ -1097,7 +1096,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { go func(webhook types.UserWebhook, reqs []types.TransitDiscord) { defer func() { // update retries counters in db based on end result - _, err = useDB.Exec(`UPDATE users_webhooks SET retries = $1, last_sent = now() WHERE id = $2;`, webhook.Retries, webhook.ID) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = $1, last_sent = now() WHERE id = $2;`, webhook.Retries, webhook.ID) if err != nil { log.Warnf("failed to update retries counter to %v for webhook %v: %v", webhook.Retries, webhook.ID, err) } @@ -1107,7 +1106,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { for _, req := range reqs { ids = append(ids, req.Id) } - _, err = db.FrontendWriterDB.Exec(`UPDATE notification_queue SET sent = now() where id = ANY($1)`, pq.Array(ids)) + _, err = db.WriterDb.Exec(`UPDATE notification_queue SET sent = now() where id = ANY($1)`, pq.Array(ids)) if err != nil { log.Warnf("failed to update sent for notifcations in queue: %v", err) } @@ -1161,7 +1160,7 @@ func sendDiscordNotifications(useDB *sqlx.DB) error { } else { log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) } - _, err = useDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) + _, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET request = $2, response = $3 WHERE id = $1;`, webhook.ID, reqs[i].Content.DiscordRequest, errResp) if err != nil { log.Error(err, "error storing failure data in users_webhooks table", 0) } From 1b960a4667ab5951bbe55a91a1020a1a831565fd Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 9 Sep 2024 15:11:15 +0200 Subject: [PATCH 101/187] ci: fix docker-image-tags (#844) BEDS-90 --- .github/workflows/frontend-publish-docker.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/frontend-publish-docker.yml b/.github/workflows/frontend-publish-docker.yml index 1ee6c4391..4826823ce 100644 --- a/.github/workflows/frontend-publish-docker.yml +++ b/.github/workflows/frontend-publish-docker.yml @@ -46,7 +46,9 @@ jobs: uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: ${{ env.BEACONCHAIN_VERSION }} + tags: | + ${{ env.BEACONCHAIN_VERSION }} + type=ref,event=branch # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. From 0856faca8735663457b4be608f6edc6633f11d7e Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 9 Sep 2024 15:14:10 +0200 Subject: [PATCH 102/187] ci: fix docker-image-tags (#845) BEDS-90 --- .github/workflows/backend-publish-docker.yml | 4 +++- .github/workflows/frontend-publish-docker.yml | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/backend-publish-docker.yml b/.github/workflows/backend-publish-docker.yml index 393585908..adfb81822 100644 --- a/.github/workflows/backend-publish-docker.yml +++ b/.github/workflows/backend-publish-docker.yml @@ -46,7 +46,9 @@ jobs: uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} - tags: ${{ env.BEACONCHAIN_VERSION }} + tags: | + ${{ env.BEACONCHAIN_VERSION }} + type=ref,event=branch # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. diff --git a/.github/workflows/frontend-publish-docker.yml b/.github/workflows/frontend-publish-docker.yml index 4826823ce..8c85468f9 100644 --- a/.github/workflows/frontend-publish-docker.yml +++ b/.github/workflows/frontend-publish-docker.yml @@ -32,7 +32,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Set version run: | - echo "BEACONCHAIN_VERSION=$(git describe --always --tags)" >> "$GITHUB_ENV" + echo "BEACONCHAIN_VERSION=$(TZ=UTC0 git show --quiet --date='format-local:%Y%m%d%H%M%S' --format="%cd" $GITHUB_SHA)-$(git describe $GITHUB_SHA --always --tags)" >> "$GITHUB_ENV" # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. - name: Log in to the Container registry uses: docker/login-action@65b78e6e13532edd9afa3aa52ac7964289d1a9c1 From 64e7c930974ece7113f83211a6f0de3eeac8dc84 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:44:44 +0200 Subject: [PATCH 103/187] BEDS-306: handle clean shutdown cleanly --- backend/cmd/api/main.go | 2 +- backend/cmd/monitoring/main.go | 2 +- backend/pkg/api/data_access/healthz.go | 18 +++- backend/pkg/commons/log/log.go | 32 ++----- backend/pkg/monitoring/constants/main.go | 2 + backend/pkg/monitoring/monitoring.go | 7 ++ backend/pkg/monitoring/services/base.go | 31 ++----- .../services/clean_shutdown_spam.go | 86 +++++++++++++++++++ .../services/clickhouse_rollings.go | 4 +- .../monitoring/services/timeout_detector.go | 6 +- 10 files changed, 133 insertions(+), 57 deletions(-) create mode 100644 backend/pkg/monitoring/services/clean_shutdown_spam.go diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index 54f6ccf1c..4821cfd0a 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -52,7 +52,6 @@ func Run() { // enable light-weight db connection monitoring monitoring.Init(false) monitoring.Start() - defer monitoring.Stop() } var dataAccessor dataaccess.DataAccessor @@ -103,6 +102,7 @@ func Run() { if srv != nil { shutDownCtx, cancelShutDownCtx := context.WithTimeout(context.Background(), 10*time.Second) defer cancelShutDownCtx() + monitoring.Stop() err = srv.Shutdown(shutDownCtx) if err != nil { log.Error(err, "error shutting down server", 0) diff --git a/backend/cmd/monitoring/main.go b/backend/cmd/monitoring/main.go index 57df9f824..a4bef3a60 100644 --- a/backend/cmd/monitoring/main.go +++ b/backend/cmd/monitoring/main.go @@ -55,8 +55,8 @@ func Run() { monitoring.Init(true) monitoring.Start() - defer monitoring.Stop() // gotta wait forever utils.WaitForCtrlC() + monitoring.Stop() } diff --git a/backend/pkg/api/data_access/healthz.go b/backend/pkg/api/data_access/healthz.go index 525ed0db7..303184532 100644 --- a/backend/pkg/api/data_access/healthz.go +++ b/backend/pkg/api/data_access/healthz.go @@ -4,6 +4,7 @@ import ( "context" "slices" + ch "github.com/ClickHouse/clickhouse-go/v2" "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" @@ -19,7 +20,17 @@ func (d *DataAccessService) GetHealthz(ctx context.Context, showAll bool) types. var results []types.HealthzResult var response types.HealthzData query := ` - with active_reports as ( + with clean_shutdown_events as ( + SELECT + emitter, + toNullable(inserted_at) as inserted_at + FROM + status_reports + WHERE + deployment_type = {deployment_type:String} + AND inserted_at >= now() - interval 1 days + AND event_id = {clean_shutdown_event_id:String} + ), active_reports as ( SELECT event_id, emitter, @@ -31,7 +42,8 @@ func (d *DataAccessService) GetHealthz(ctx context.Context, showAll bool) types. status, metadata FROM status_reports - WHERE expires_at > now() and deployment_type = ? + LEFT JOIN clean_shutdown_events cse ON status_reports.emitter = clean_shutdown_events.emitter + WHERE expires_at > now() and deployment_type = {deployment_type:String} and (status_reports.inserted_at < cse.inserted_at or cse.inserted_at is null) ORDER BY event_id ASC, emitter ASC, @@ -99,7 +111,7 @@ func (d *DataAccessService) GetHealthz(ctx context.Context, showAll bool) types. response.Reports = make(map[string][]types.HealthzResult) response.ReportingUUID = utils.GetUUID() response.DeploymentType = utils.Config.DeploymentType - err := db.ClickHouseReader.SelectContext(ctx, &results, query, utils.Config.DeploymentType) + err := db.ClickHouseReader.SelectContext(ctx, &results, query, ch.Named("deployment_type", utils.Config.DeploymentType), ch.Named("clean_shutdown_event_id", constants.CleanShutdownEvent)) if err != nil { response.Reports["response_error"] = []types.HealthzResult{ { diff --git a/backend/pkg/commons/log/log.go b/backend/pkg/commons/log/log.go index dc9c11b0b..60684f6cb 100644 --- a/backend/pkg/commons/log/log.go +++ b/backend/pkg/commons/log/log.go @@ -36,12 +36,7 @@ func Infof(format string, args ...interface{}) { } func InfoWithFields(additionalInfos Fields, msg string) { - logFields := logrus.NewEntry(logrus.New()) - for name, info := range additionalInfos { - logFields = logFields.WithField(name, info) - } - - logFields.Info(msg) + logrus.WithFields(additionalInfos).Info(msg) } func Warn(args ...interface{}) { @@ -53,12 +48,7 @@ func Warnf(format string, args ...interface{}) { } func WarnWithFields(additionalInfos Fields, msg string) { - logFields := logrus.NewEntry(logrus.New()) - for name, info := range additionalInfos { - logFields = logFields.WithField(name, info) - } - - logFields.Warn(msg) + logrus.WithFields(additionalInfos).Warn(msg) } func Tracef(format string, args ...interface{}) { @@ -66,21 +56,11 @@ func Tracef(format string, args ...interface{}) { } func TraceWithFields(additionalInfos Fields, msg string) { - logFields := logrus.NewEntry(logrus.New()) - for name, info := range additionalInfos { - logFields = logFields.WithField(name, info) - } - - logFields.Trace(msg) + logrus.WithFields(additionalInfos).Trace(msg) } func DebugWithFields(additionalInfos Fields, msg string) { - logFields := logrus.NewEntry(logrus.New()) - for name, info := range additionalInfos { - logFields = logFields.WithField(name, info) - } - - logFields.Debug(msg) + logrus.WithFields(additionalInfos).Debug(msg) } func Debugf(format string, args ...interface{}) { @@ -96,7 +76,7 @@ func logErrorInfo(err error, callerSkip int, additionalInfos ...Fields) *logrus. } pc, fullFilePath, line, ok := runtime.Caller(callerSkip + 2) if ok { - logFields = logFields.WithFields(logrus.Fields{ + logFields = logFields.WithFields(Fields{ "_file": filepath.Base(fullFilePath), "_function": runtime.FuncForPC(pc).Name(), "_line": line, @@ -152,4 +132,4 @@ func logErrorInfo(err error, callerSkip int, additionalInfos ...Fields) *logrus. return logFields } -type Fields map[string]interface{} +type Fields = logrus.Fields diff --git a/backend/pkg/monitoring/constants/main.go b/backend/pkg/monitoring/constants/main.go index 7ffa891c2..9e20f037b 100644 --- a/backend/pkg/monitoring/constants/main.go +++ b/backend/pkg/monitoring/constants/main.go @@ -11,3 +11,5 @@ const ( Failure StatusType = "failure" Default time.Duration = -1 * time.Second ) + +const CleanShutdownEvent = "clean_shutdown" diff --git a/backend/pkg/monitoring/monitoring.go b/backend/pkg/monitoring/monitoring.go index f814d3a66..b24c3cb9a 100644 --- a/backend/pkg/monitoring/monitoring.go +++ b/backend/pkg/monitoring/monitoring.go @@ -2,9 +2,11 @@ package monitoring import ( "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/monitoring/constants" "github.com/gobitfly/beaconchain/pkg/monitoring/services" ) @@ -33,6 +35,7 @@ func Init(full bool) { &services.ServiceClickhouseRollings{}, &services.ServiceClickhouseEpoch{}, &services.ServiceTimeoutDetector{}, + &services.CleanShutdownSpamDetector{}, ) } @@ -42,13 +45,17 @@ func Init(full bool) { } func Start() { + log.Infof("starting monitoring services") for _, service := range monitoredServices { service.Start() } } func Stop() { + log.Infof("stopping monitoring services") for _, service := range monitoredServices { service.Stop() } + // this prevents status reports that werent shut down cleanly from triggering alerts + services.NewStatusReport(constants.CleanShutdownEvent, constants.Default, constants.Default)(constants.Success, nil) } diff --git a/backend/pkg/monitoring/services/base.go b/backend/pkg/monitoring/services/base.go index 8bea11b47..9f5c6e3c3 100644 --- a/backend/pkg/monitoring/services/base.go +++ b/backend/pkg/monitoring/services/base.go @@ -3,11 +3,8 @@ package services import ( "context" "fmt" - "os" - "os/signal" "sync" "sync/atomic" - "syscall" "time" "github.com/gobitfly/beaconchain/pkg/commons/db" @@ -45,31 +42,12 @@ func (s *ServiceBase) Stop() { s.wg.Wait() } -var isShuttingDown atomic.Bool -var once sync.Once - -func autoGracefulStop() { - c := make(chan os.Signal, 1) - signal.Notify(c, os.Interrupt, syscall.SIGINT, syscall.SIGTERM) - <-c - isShuttingDown.Store(true) -} - func NewStatusReport(id string, timeout time.Duration, check_interval time.Duration) func(status constants.StatusType, metadata map[string]string) { runId := uuid.New().String() - // run if it hasnt started yet - once.Do(func() { go autoGracefulStop() }) return func(status constants.StatusType, metadata map[string]string) { // acquire snowflake synchronously flake := utils.GetSnowflake() - go func() { - // check if we are alive - if isShuttingDown.Load() { - log.Info("shutting down, not reporting status") - return - } - if metadata == nil { metadata = make(map[string]string) } @@ -90,6 +68,15 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat if check_interval >= 5*time.Minute { expires_at = timeouts_at.Add(check_interval) } + log.TraceWithFields(log.Fields{ + "emitter": id, + "event_id": utils.GetUUID(), + "deployment_type": utils.Config.DeploymentType, + "insert_id": flake, + "expires_at": expires_at, + "timeouts_at": timeouts_at, + "metadata": metadata, + }, "sending status report") var err error if db.ClickHouseNativeWriter != nil { err = db.ClickHouseNativeWriter.AsyncInsert( diff --git a/backend/pkg/monitoring/services/clean_shutdown_spam.go b/backend/pkg/monitoring/services/clean_shutdown_spam.go new file mode 100644 index 000000000..0dcf01391 --- /dev/null +++ b/backend/pkg/monitoring/services/clean_shutdown_spam.go @@ -0,0 +1,86 @@ +package services + +import ( + "context" + "encoding/json" + "strconv" + "time" + + "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/utils" + "github.com/gobitfly/beaconchain/pkg/monitoring/constants" +) + +type CleanShutdownSpamDetector struct { + ServiceBase +} + +func (s *CleanShutdownSpamDetector) Start() { + if !s.running.CompareAndSwap(false, true) { + // already running, return error + return + } + s.wg.Add(1) + go s.internalProcess() +} + +func (s *CleanShutdownSpamDetector) internalProcess() { + defer s.wg.Done() + s.runChecks() + for { + select { + case <-s.ctx.Done(): + return + case <-time.After(30 * time.Second): + s.runChecks() + } + } +} + +func (s *CleanShutdownSpamDetector) runChecks() { + id := "monitoring_clean_shutdown_spam" + r := NewStatusReport(id, constants.Default, 30*time.Second) + r(constants.Running, nil) + if db.ClickHouseReader == nil { + r(constants.Failure, map[string]string{"error": "clickhouse reader is nil"}) + // ignore + return + } + log.Tracef("checking clean shutdown spam") + + query := ` + SELECT + emitter + FROM + status_reports + WHERE + deployment_type = ? + AND inserted_at >= now() - interval 5 minutes + AND event_id = ? + ` + ctx, cancel := context.WithTimeout(s.ctx, 15*time.Second) + defer cancel() + var emitters []string + err := db.ClickHouseReader.SelectContext(ctx, &emitters, query, utils.Config.DeploymentType, constants.CleanShutdownEvent) + if err != nil { + r(constants.Failure, map[string]string{"error": err.Error()}) + return + } + threshold := 10 + md := map[string]string{ + "count": strconv.Itoa(len(emitters)), + "threshold": strconv.Itoa(threshold), + } + if len(emitters) > threshold { + payload, err := json.Marshal(emitters) + if err != nil { + r(constants.Failure, map[string]string{"error": err.Error()}) + return + } + md["emitters"] = string(payload) + r(constants.Failure, md) + return + } + r(constants.Success, md) +} diff --git a/backend/pkg/monitoring/services/clickhouse_rollings.go b/backend/pkg/monitoring/services/clickhouse_rollings.go index 259bf5a44..8bab09dc1 100644 --- a/backend/pkg/monitoring/services/clickhouse_rollings.go +++ b/backend/pkg/monitoring/services/clickhouse_rollings.go @@ -71,7 +71,7 @@ func (s *ServiceClickhouseRollings) runChecks() { err := db.ClickHouseReader.GetContext(ctx, &tsEpochTable, ` SELECT max(epoch_timestamp) - FROM holesky.validator_dashboard_data_epoch`, + FROM validator_dashboard_data_epoch`, ) if err != nil { r(constants.Failure, map[string]string{"error": err.Error()}) @@ -81,7 +81,7 @@ func (s *ServiceClickhouseRollings) runChecks() { err = db.ClickHouseReader.GetContext(ctx, &epochRollingTable, fmt.Sprintf(` SELECT max(epoch_end) - FROM holesky.validator_dashboard_data_rolling_%s`, + FROM validator_dashboard_data_rolling_%s`, rolling, ), ) diff --git a/backend/pkg/monitoring/services/timeout_detector.go b/backend/pkg/monitoring/services/timeout_detector.go index 720bb80dc..83f85ccd3 100644 --- a/backend/pkg/monitoring/services/timeout_detector.go +++ b/backend/pkg/monitoring/services/timeout_detector.go @@ -61,7 +61,7 @@ func (s *ServiceTimeoutDetector) runChecks() { status, metadata FROM status_reports - WHERE expires_at > now() and deployment_type = ? + WHERE expires_at > now() and deployment_type = ? and emitter not in (select distinct emitter from status_reports where event_id = ? and inserted_at > now() - interval 1 days) ORDER BY event_id ASC, emitter ASC, @@ -87,6 +87,7 @@ func (s *ServiceTimeoutDetector) runChecks() { ) SELECT event_id, + emitter, status, inserted_at, expires_at, @@ -101,13 +102,14 @@ func (s *ServiceTimeoutDetector) runChecks() { defer cancel() var victims []struct { EventID string `db:"event_id"` + Emitter string `db:"emitter"` Status string `db:"status"` InsertedAt time.Time `db:"inserted_at"` ExpiresAt time.Time `db:"expires_at"` TimeoutsAt time.Time `db:"timeouts_at"` Metadata map[string]string `db:"metadata"` } - err := db.ClickHouseReader.SelectContext(ctx, &victims, query, utils.Config.DeploymentType) + err := db.ClickHouseReader.SelectContext(ctx, &victims, query, utils.Config.DeploymentType, constants.CleanShutdownEvent) if err != nil { r(constants.Failure, map[string]string{"error": err.Error()}) return From 43cfa46b96637dfd458cf5b08d062560d3c71f5c Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 15:47:02 +0200 Subject: [PATCH 104/187] BEDS-306: api: trigger clean shutdown event even if not listening --- backend/cmd/api/main.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/backend/cmd/api/main.go b/backend/cmd/api/main.go index 4821cfd0a..90ce205f3 100644 --- a/backend/cmd/api/main.go +++ b/backend/cmd/api/main.go @@ -97,12 +97,11 @@ func Run() { }() utils.WaitForCtrlC() - + monitoring.Stop() // this will emit a clean shutdown event log.Info("shutting down server") if srv != nil { shutDownCtx, cancelShutDownCtx := context.WithTimeout(context.Background(), 10*time.Second) defer cancelShutDownCtx() - monitoring.Stop() err = srv.Shutdown(shutDownCtx) if err != nil { log.Error(err, "error shutting down server", 0) From 0f5097e37e85a498cffbd568e5785b52b07989d4 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:07:49 +0200 Subject: [PATCH 105/187] BEDS-306: monitoring: acquire current time with snowflake --- backend/pkg/commons/utils/uuid.go | 4 +++- backend/pkg/monitoring/services/base.go | 5 +++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/backend/pkg/commons/utils/uuid.go b/backend/pkg/commons/utils/uuid.go index 6932f1c76..5142a1143 100644 --- a/backend/pkg/commons/utils/uuid.go +++ b/backend/pkg/commons/utils/uuid.go @@ -6,6 +6,7 @@ import ( "github.com/bwmarrin/snowflake" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/google/uuid" + "golang.org/x/exp/rand" ) // uuid that you can get - gets set to a random value on startup/first read @@ -30,7 +31,8 @@ func GetSnowflake() int64 { return v.(*snowflake.Node).Generate().Int64() } - node, err := snowflake.NewNode(1) + nodeId := rand.Int63() & 0xFF + node, err := snowflake.NewNode(nodeId) if err != nil { log.Fatal(err, "snowflake generator failed to start", 0) return 0 diff --git a/backend/pkg/monitoring/services/base.go b/backend/pkg/monitoring/services/base.go index 9f5c6e3c3..549b4d67f 100644 --- a/backend/pkg/monitoring/services/base.go +++ b/backend/pkg/monitoring/services/base.go @@ -47,6 +47,7 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat return func(status constants.StatusType, metadata map[string]string) { // acquire snowflake synchronously flake := utils.GetSnowflake() + now := time.Now() go func() { if metadata == nil { metadata = make(map[string]string) @@ -60,9 +61,9 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - timeouts_at := time.Now().Add(1 * time.Minute) + timeouts_at := now.Add(1 * time.Minute) if timeout != constants.Default { - timeouts_at = time.Now().Add(timeout) + timeouts_at = now.Add(timeout) } expires_at := timeouts_at.Add(5 * time.Minute) if check_interval >= 5*time.Minute { From 86da74c466e8e61e90e1b3e1bf7cc7744cc2963a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:27:46 +0000 Subject: [PATCH 106/187] fix(notifications): expand args in sql query --- backend/pkg/notification/db.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 4363c12f7..6e19acf3b 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -58,7 +58,7 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las } subMap := make(map[string][]types.Subscription, 0) - err = db.FrontendWriterDB.Select(&subs, query, args) + err = db.FrontendWriterDB.Select(&subs, query, args...) if err != nil { return nil, err } @@ -69,6 +69,7 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las } subMap[sub.EventFilter] = append(subMap[sub.EventFilter], sub) } + return subMap, nil } From 4d5ed1eda53796380c15cc0a80135840d3f11b86 Mon Sep 17 00:00:00 2001 From: Patrick Date: Tue, 10 Sep 2024 13:08:12 +0200 Subject: [PATCH 107/187] chore(env): add env-var to distinguish between deployment-types (#850) The variable will be set to `production` or `staging` corresponding to the deployment-type --- frontend/.env-example | 1 + frontend/nuxt.config.ts | 1 + 2 files changed, 2 insertions(+) diff --git a/frontend/.env-example b/frontend/.env-example index 563b8dc92..8e5b8de1b 100644 --- a/frontend/.env-example +++ b/frontend/.env-example @@ -11,4 +11,5 @@ NUXT_PUBLIC_V1_DOMAIN: "" NUXT_PUBLIC_LOG_FILE: "" NUXT_PUBLIC_CHAIN_ID_BY_DEFAULT: "" NUXT_PUBLIC_MAINTENANCE_TS: "1717700652" +NUXT_PUBLIC_DEPLOYMENT_TYPE: "development" PRIVATE_SSR_SECRET: "" diff --git a/frontend/nuxt.config.ts b/frontend/nuxt.config.ts index 8c932276c..81bb64d3a 100644 --- a/frontend/nuxt.config.ts +++ b/frontend/nuxt.config.ts @@ -78,6 +78,7 @@ export default defineNuxtConfig({ apiClient: process.env.PUBLIC_API_CLIENT, apiKey: process.env.PUBLIC_API_KEY, chainIdByDefault: process.env.PUBLIC_CHAIN_ID_BY_DEFAULT, + deploymentType: process.env.PUBLIC_DEPLOYMENT_TYPE, domain: process.env.PUBLIC_DOMAIN, gitVersion, legacyApiClient: process.env.PUBLIC_LEGACY_API_CLIENT, From 454534a2df918840e113c46b120e8f38a8e6c9d0 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:09:27 +0000 Subject: [PATCH 108/187] chore(notifications): add some util funcs --- backend/cmd/misc/main.go | 60 ++++++++++++++--------- backend/pkg/commons/types/frontend.go | 4 +- backend/pkg/notification/db.go | 6 ++- backend/pkg/notification/notifications.go | 16 ++++-- 4 files changed, 57 insertions(+), 29 deletions(-) diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index d7b9b77a1..8af9a3499 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -32,6 +32,7 @@ import ( edb "github.com/gobitfly/beaconchain/pkg/exporter/db" "github.com/gobitfly/beaconchain/pkg/exporter/modules" "github.com/gobitfly/beaconchain/pkg/exporter/services" + "github.com/gobitfly/beaconchain/pkg/notification" _ "github.com/jackc/pgx/v5/stdlib" "github.com/pkg/errors" utilMath "github.com/protolambda/zrnt/eth2/util/math" @@ -75,7 +76,7 @@ func Run() { } configPath := fs.String("config", "config/default.config.yml", "Path to the config file") - fs.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases") + fs.StringVar(&opts.Command, "command", "", "command to run, available: updateAPIKey, applyDbSchema, initBigtableSchema, epoch-export, debug-rewards, debug-blocks, clear-bigtable, index-old-eth1-blocks, update-aggregation-bits, historic-prices-export, index-missing-blocks, export-epoch-missed-slots, migrate-last-attestation-slot-bigtable, export-genesis-validators, update-block-finalization-sequentially, nameValidatorsByRanges, export-stats-totals, export-sync-committee-periods, export-sync-committee-validator-stats, partition-validator-stats, migrate-app-purchases, collect-notifications") fs.Uint64Var(&opts.StartEpoch, "start-epoch", 0, "start epoch") fs.Uint64Var(&opts.EndEpoch, "end-epoch", 0, "end epoch") fs.Uint64Var(&opts.User, "user", 0, "user id") @@ -181,27 +182,27 @@ func Run() { defer db.FrontendWriterDB.Close() // clickhouse - db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ - Username: cfg.ClickHouse.WriterDatabase.Username, - Password: cfg.ClickHouse.WriterDatabase.Password, - Name: cfg.ClickHouse.WriterDatabase.Name, - Host: cfg.ClickHouse.WriterDatabase.Host, - Port: cfg.ClickHouse.WriterDatabase.Port, - MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, - SSL: true, - MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, - }, &types.DatabaseConfig{ - Username: cfg.ClickHouse.ReaderDatabase.Username, - Password: cfg.ClickHouse.ReaderDatabase.Password, - Name: cfg.ClickHouse.ReaderDatabase.Name, - Host: cfg.ClickHouse.ReaderDatabase.Host, - Port: cfg.ClickHouse.ReaderDatabase.Port, - MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, - SSL: true, - MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, - }, "clickhouse", "clickhouse") - defer db.ClickHouseReader.Close() - defer db.ClickHouseWriter.Close() + // db.ClickHouseWriter, db.ClickHouseReader = db.MustInitDB(&types.DatabaseConfig{ + // Username: cfg.ClickHouse.WriterDatabase.Username, + // Password: cfg.ClickHouse.WriterDatabase.Password, + // Name: cfg.ClickHouse.WriterDatabase.Name, + // Host: cfg.ClickHouse.WriterDatabase.Host, + // Port: cfg.ClickHouse.WriterDatabase.Port, + // MaxOpenConns: cfg.ClickHouse.WriterDatabase.MaxOpenConns, + // SSL: true, + // MaxIdleConns: cfg.ClickHouse.WriterDatabase.MaxIdleConns, + // }, &types.DatabaseConfig{ + // Username: cfg.ClickHouse.ReaderDatabase.Username, + // Password: cfg.ClickHouse.ReaderDatabase.Password, + // Name: cfg.ClickHouse.ReaderDatabase.Name, + // Host: cfg.ClickHouse.ReaderDatabase.Host, + // Port: cfg.ClickHouse.ReaderDatabase.Port, + // MaxOpenConns: cfg.ClickHouse.ReaderDatabase.MaxOpenConns, + // SSL: true, + // MaxIdleConns: cfg.ClickHouse.ReaderDatabase.MaxIdleConns, + // }, "clickhouse", "clickhouse") + // defer db.ClickHouseReader.Close() + // defer db.ClickHouseWriter.Close() // Initialize the persistent redis client rdc := redis.NewClient(&redis.Options{ @@ -456,6 +457,8 @@ func Run() { err = fixEns(erigonClient) case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) + case "collect-notifications": + err = collectNotifications(opts.StartEpoch, opts.EndEpoch) default: log.Fatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -467,6 +470,19 @@ func Run() { } } +func collectNotifications(startEpoch, endEpoch uint64) error { + epoch := startEpoch + + log.Infof("collecting notifications for epoch %v", epoch) + notifications, err := notification.GetNotificationsForEpoch(utils.Config.Notifications.PubkeyCachePath, epoch) + if err != nil { + return err + } + + log.Infof("found %v notifications for epoch %v", len(notifications), epoch) + return nil +} + func fixEns(erigonClient *rpc.ErigonClient) error { log.Infof("command: fix-ens") addrs := []struct { diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index 767aca38b..eb1c04472 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -28,10 +28,10 @@ type NotificationsPerUserId map[UserId]map[EventName]map[EventFilter]Notificatio func (npui NotificationsPerUserId) AddNotification(n Notification) { if n.GetUserId() == 0 { - log.Fatal(fmt.Errorf("Notification user id is 0"), fmt.Sprintf("Notification: %v", n), 0) + log.Fatal(fmt.Errorf("Notification user id is 0"), fmt.Sprintf("Notification: %v", n), 1) } if n.GetEventName() == "" { - log.Fatal(fmt.Errorf("Notification event name is empty"), fmt.Sprintf("Notification: %v", n), 0) + log.Fatal(fmt.Errorf("Notification event name is empty"), fmt.Sprintf("Notification: %v", n), 1) } // next check is disabled as there are events that do not require a filter (rocketpool, network events) // if n.GetEventFilter() == "" { diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 6e19acf3b..1f999a411 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -3,6 +3,7 @@ package notification import ( "github.com/doug-martin/goqu/v9" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" @@ -39,7 +40,8 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las goqu.C("last_sent_epoch"), goqu.C("created_epoch"), goqu.C("event_threshold"), - ).Where(goqu.C("event_name").Eq(utils.GetNetwork() + ":" + string(eventName))) + goqu.C("event_name"), + ).Where(goqu.L("(event_name = ? AND user_id <> 0)", utils.GetNetwork()+":"+string(eventName))) if lastSentFilter != "" { if len(lastSentFilterArgs) > 0 { @@ -63,6 +65,8 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las return nil, err } + log.Infof("Found %d subscriptions for event %s", len(subs), eventName) + for _, sub := range subs { if _, ok := subMap[sub.EventFilter]; !ok { subMap[sub.EventFilter] = make([]types.Subscription, 0) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 7b148d0d5..d7cb84e97 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -45,6 +45,14 @@ func InitNotificationSender() { go notificationSender() } +func GetNotificationsForEpoch(pubkeyCachePath string, epoch uint64) (types.NotificationsPerUserId, error) { + err := initPubkeyCache(pubkeyCachePath) + if err != nil { + log.Fatal(err, "error initializing pubkey cache path for notifications", 0) + } + return collectNotifications(epoch) +} + func InitNotificationCollector(pubkeyCachePath string) { err := initPubkeyCache(pubkeyCachePath) if err != nil { @@ -1739,8 +1747,8 @@ func collectValidatorGotSlashedNotifications(notificationsByUserID types.Notific for _, subs := range subscribedUsers { for _, sub := range subs { event := pubkeyToSlashingInfoMap[sub.EventFilter] - if event == nil { - log.Error(fmt.Errorf("error retrieving slashing info for public key %s", sub.EventFilter), "", 0) + if event == nil { // pubkey has not been slashed + //log.Error(fmt.Errorf("error retrieving slashing info for public key %s", sub.EventFilter), "", 0) continue } log.Infof("creating %v notification for validator %v in epoch %v", event.Reason, sub.EventFilter, epoch) @@ -2386,7 +2394,7 @@ func collectNetworkNotifications(notificationsByUserID types.NotificationsPerUse dbResult, err := GetSubsForEventFilter( types.NetworkLivenessIncreasedEventName, - "us.last_sent_ts <= NOW() - INTERVAL '1 hour' OR us.last_sent_ts IS NULL", + "(last_sent_ts <= NOW() - INTERVAL '1 hour' OR last_sent_ts IS NULL)", nil, nil, ) @@ -2760,7 +2768,7 @@ func collectSyncCommittee(notificationsByUserID types.NotificationsPerUserId, ep pubKeys = append(pubKeys, val.PubKey) } - dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoon, "us.last_sent_ts <= NOW() - INTERVAL '26 hours' OR us.last_sent_ts IS NULL", nil, pubKeys) + dbResult, err := GetSubsForEventFilter(types.SyncCommitteeSoon, "(last_sent_ts <= NOW() - INTERVAL '26 hours' OR last_sent_ts IS NULL)", nil, pubKeys) // err = db.FrontendWriterDB.Select(&dbResult, ` // SELECT us.id, us.user_id, us.event_filter, ENCODE(us.unsubscribe_hash, 'hex') as unsubscribe_hash // FROM users_subscriptions AS us From d8282b1130aaf0251e5bab103767e2066a3df80a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:13:44 +0000 Subject: [PATCH 109/187] chore(notifications): please linter --- backend/cmd/misc/main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/cmd/misc/main.go b/backend/cmd/misc/main.go index 8af9a3499..fd77d8217 100644 --- a/backend/cmd/misc/main.go +++ b/backend/cmd/misc/main.go @@ -458,7 +458,7 @@ func Run() { case "fix-ens-addresses": err = fixEnsAddresses(erigonClient) case "collect-notifications": - err = collectNotifications(opts.StartEpoch, opts.EndEpoch) + err = collectNotifications(opts.StartEpoch) default: log.Fatal(nil, fmt.Sprintf("unknown command %s", opts.Command), 0) } @@ -470,7 +470,7 @@ func Run() { } } -func collectNotifications(startEpoch, endEpoch uint64) error { +func collectNotifications(startEpoch uint64) error { epoch := startEpoch log.Infof("collecting notifications for epoch %v", epoch) From 4467399f158602a575230cd39f294448c9beda37 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:31:13 +0000 Subject: [PATCH 110/187] chore(notification): update firebase sdk --- backend/go.mod | 49 +++++----- backend/go.sum | 109 ++++++++++++---------- backend/pkg/notification/firebase.go | 30 +++--- backend/pkg/notification/notifications.go | 2 +- 4 files changed, 104 insertions(+), 86 deletions(-) diff --git a/backend/go.mod b/backend/go.mod index b53006df9..25788bb93 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -6,6 +6,7 @@ require ( cloud.google.com/go/bigtable v1.21.0 cloud.google.com/go/secretmanager v1.11.5 firebase.google.com/go v3.13.0+incompatible + firebase.google.com/go/v4 v4.14.1 github.com/ClickHouse/clickhouse-go/v2 v2.17.1 github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 github.com/alexedwards/scs/redisstore v0.0.0-20240316134038-7e11d57e8885 @@ -29,7 +30,7 @@ require ( github.com/gobitfly/eth.store v0.0.0-20240312111708-b43f13990280 github.com/golang-jwt/jwt v3.2.2+incompatible github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/golang/protobuf v1.5.3 + github.com/golang/protobuf v1.5.4 github.com/gomodule/redigo v1.9.2 github.com/google/uuid v1.6.0 github.com/gorilla/csrf v1.7.2 @@ -69,26 +70,27 @@ require ( github.com/wealdtech/go-eth2-types/v2 v2.8.2 github.com/wealdtech/go-eth2-util v1.8.0 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/crypto v0.19.0 + golang.org/x/crypto v0.21.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a golang.org/x/sync v0.6.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 golang.org/x/tools v0.18.0 - google.golang.org/api v0.164.0 - google.golang.org/protobuf v1.32.0 + google.golang.org/api v0.170.0 + google.golang.org/protobuf v1.33.0 gopkg.in/yaml.v2 v2.4.0 ) require ( - cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go v0.112.1 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/firestore v1.14.0 // indirect - cloud.google.com/go/iam v1.1.5 // indirect - cloud.google.com/go/longrunning v0.5.4 // indirect - cloud.google.com/go/storage v1.36.0 // indirect + cloud.google.com/go/firestore v1.15.0 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + cloud.google.com/go/longrunning v0.5.5 // indirect + cloud.google.com/go/storage v1.40.0 // indirect github.com/ClickHouse/ch-go v0.58.2 // indirect + github.com/MicahParks/keyfunc v1.9.0 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 // indirect github.com/ajg/form v1.5.1 // indirect @@ -149,7 +151,7 @@ require ( github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/gorilla/securecookie v1.1.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/herumi/bls-eth-go-binary v1.31.0 // indirect @@ -241,24 +243,25 @@ require ( github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/mod v0.15.0 // indirect - golang.org/x/net v0.21.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect - golang.org/x/sys v0.17.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect - google.golang.org/grpc v1.62.0 // indirect + google.golang.org/appengine/v2 v2.0.2 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/grpc v1.62.1 // indirect gopkg.in/cenkalti/backoff.v1 v1.1.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect diff --git a/backend/go.sum b/backend/go.sum index 7d41b5693..a15806cb0 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -1,26 +1,28 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/bigtable v1.21.0 h1:BFN4jhkA9ULYYV2Ug7AeOtetVLnN2jKuIq5TcRc5C38= cloud.google.com/go/bigtable v1.21.0/go.mod h1:V0sYNRtk0dgAKjyRr/MyBpHpSXqh+9P39euf820EZ74= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/firestore v1.14.0 h1:8aLcKnMPoldYU3YHgu4t2exrKhLQkqaXAGqT0ljrFVw= -cloud.google.com/go/firestore v1.14.0/go.mod h1:96MVaHLsEhbvkBEdZgfN+AS/GIkco1LRpH9Xp9YZfzQ= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/longrunning v0.5.4 h1:w8xEcbZodnA2BbW6sVirkkoC+1gP8wS57EUUgGS0GVg= -cloud.google.com/go/longrunning v0.5.4/go.mod h1:zqNVncI0BOP8ST6XQD1+VcvuShMmq7+xFSzOL++V0dI= +cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBpbFF8= +cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/longrunning v0.5.5 h1:GOE6pZFdSrTb4KAiKnXsJBtlE6mEyaW44oKyMILWnOg= +cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDnoTk0yawPBB7s= cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= -cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= -cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= contrib.go.opencensus.io/exporter/jaeger v0.2.1 h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI= contrib.go.opencensus.io/exporter/jaeger v0.2.1/go.mod h1:Y8IsLgdxqh1QxYxPC5IgXVmBaeLUeQFfBeBi9PbeZd0= firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +firebase.google.com/go/v4 v4.14.1 h1:4qiUETaFRWoFGE1XP5VbcEdtPX93Qs+8B/7KvP2825g= +firebase.google.com/go/v4 v4.14.1/go.mod h1:fgk2XshgNDEKaioKco+AouiegSI9oTWVqRaBdTTGBoM= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -37,6 +39,8 @@ github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 h1:HcdvlzaQ4CJfH7xbfJZ3ZHN//BTEpId46iKEMuP3wHE= github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21/go.mod h1:7PODFS++oNZ6khojmPBvkrDeFO/hrc3jmvWvQAOXorw= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/MicahParks/keyfunc v1.9.0 h1:lhKd5xrFHLNOWrDc4Tyb/Q1AJ4LCzQ48GVJyVIID3+o= +github.com/MicahParks/keyfunc v1.9.0/go.mod h1:IdnCilugA0O/99dW+/MkvlyrsX8+L8+x95xuVNtM5jw= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= @@ -323,6 +327,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= @@ -334,6 +339,7 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -345,8 +351,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -390,8 +396,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4= github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -979,18 +985,18 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 h1:UNQQKPfTDe1J81ViolILjTKPr9WetKW6uei2hFgJmFs= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0/go.mod h1:r9vWsPS/3AQItv3OSlEJ/E4mbrhUbbw18meOjArPtKQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 h1:sv9kVfal0MK0wBMCOGr+HeJm9v803BkJxGrk2au7j08= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0/go.mod h1:SK2UL73Zy1quvRPonmOmRDiWk1KBV3LyIeeIxcEApWw= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1032,8 +1038,8 @@ golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -1070,12 +1076,13 @@ golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1127,13 +1134,13 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1175,28 +1182,30 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= -google.golang.org/api v0.164.0 h1:of5G3oE2WRMVb2yoWKME4ZP8y8zpUKC6bMhxDr8ifyk= -google.golang.org/api v0.164.0/go.mod h1:2OatzO7ZDQsoS7IFf3rvsE17/TldiU3F/zxFHeqUB5o= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/appengine/v2 v2.0.2 h1:MSqyWy2shDLwG7chbwBJ5uMyw6SNqJzhJHNDwYB0Akk= +google.golang.org/appengine/v2 v2.0.2/go.mod h1:PkgRUWz4o1XOvbqtWTkBtCitEJ5Tp4HoVEdMMYQR/8E= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe h1:USL2DhxfgRchafRvt/wYyyQNzwgL7ZiURcozOE/Pkvo= -google.golang.org/genproto v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe h1:0poefMBYvYbs7g5UkjS6HcxBPaTRAmznle9jnxYoAI8= -google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 h1:FSL3lRCkhaPFxqi0s9o+V4UI2WTzAVOvkgbd4kVV4Wg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014/go.mod h1:SaPjaZGWb0lPqs6Ittu0spdfrOArqji4ZdeP5IC/9N4= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1209,8 +1218,8 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/cenkalti/backoff.v1 v1.1.0 h1:Arh75ttbsvlpVA7WtVpH4u9h6Zl46xuptxqLxPiSo4Y= gopkg.in/cenkalti/backoff.v1 v1.1.0/go.mod h1:J6Vskwqd+OMVJl8C33mmtxTBs2gyzfv7UDAkHu8BrjI= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index 91291da54..9e42cc49a 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -2,12 +2,13 @@ package notification import ( "context" + "fmt" "strings" "time" - firebase "firebase.google.com/go" "firebase.google.com/go/messaging" + firebase "firebase.google.com/go/v4" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/utils" "google.golang.org/api/option" @@ -24,10 +25,10 @@ func isRelevantError(response *messaging.SendResponse) bool { return false } -func SendPushBatch(messages []*messaging.Message) error { +func SendPushBatch(messages []*messaging.Message, dryRun bool) error { credentialsPath := utils.Config.Notifications.FirebaseCredentialsPath if credentialsPath == "" { - log.Error(nil, "firebase credentials path not provided, disabling push notifications", 0) + log.Error(fmt.Errorf("firebase credentials path not provided, disabling push notifications"), "error initializing SendPushBatch", 0) return nil } @@ -42,29 +43,32 @@ func SendPushBatch(messages []*messaging.Message) error { app, err := firebase.NewApp(context.Background(), nil, opt) if err != nil { - log.Error(nil, "error initializing app", 0) + log.Error(err, "error initializing app", 0) return err } client, err := app.Messaging(ctx) if err != nil { - log.Error(nil, "error initializing messaging", 0) + log.Error(err, "error initializing messaging", 0) return err } - var waitBeforeTryInSeconds = []time.Duration{0 * time.Second, 2 * time.Second, 4 * time.Second, 8 * time.Second, 16 * time.Second} + var waitBeforeTryInSeconds = []time.Duration{0, 2, 4, 8, 16} var resultSuccessCount, resultFailureCount int = 0, 0 var result *messaging.BatchResponse currentMessages := messages tries := 0 for _, s := range waitBeforeTryInSeconds { - time.Sleep(s) + time.Sleep(s * time.Second) tries++ - - result, err = client.SendAll(context.Background(), currentMessages) + if dryRun { + result, err = client.SendEachDryRun(context.Background(), currentMessages) + } else { + result, err = client.SendEach(context.Background(), currentMessages) + } if err != nil { - log.Error(nil, "error sending push notifications", 0) + log.Error(err, "error sending push notifications", 0) return err } @@ -74,7 +78,9 @@ func SendPushBatch(messages []*messaging.Message) error { newMessages := make([]*messaging.Message, 0, result.FailureCount) if result.FailureCount > 0 { for i, response := range result.Responses { + logger.Info(response) if isRelevantError(response) { + logger.Infof("retrying message %d", i) newMessages = append(newMessages, currentMessages[i]) resultFailureCount-- } @@ -90,12 +96,12 @@ func SendPushBatch(messages []*messaging.Message) error { if len(currentMessages) > 0 { for _, response := range result.Responses { if isRelevantError(response) { - log.Error(nil, "firebase error", 0, log.Fields{"MessageID": response.MessageID, "response": response.Error}) + logger.WithError(response.Error).WithField("MessageID", response.MessageID).Errorf("firebase error") resultFailureCount++ } } } - log.Infof("sent %d firebase notifications in %d of %d tries. successful: %d | failed: %d", len(messages), tries, len(waitBeforeTryInSeconds), resultSuccessCount, resultFailureCount) + logger.Infof("sent %d firebase notifications in %d of %d tries. successful: %d | failed: %d", len(messages), tries, len(waitBeforeTryInSeconds), resultSuccessCount, resultFailureCount) return nil } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index d7cb84e97..944e2aa99 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -678,7 +678,7 @@ func sendPushNotifications() error { end = len(n.Content.Messages) } - err = SendPushBatch(n.Content.Messages[start:end]) + err = SendPushBatch(n.Content.Messages[start:end], false) if err != nil { metrics.Errors.WithLabelValues("notifications_send_push_batch").Inc() log.Error(err, "error sending firebase batch job", 0) From 13780b3bd18e55c854f469d4bd0f65b906531fec Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:37:15 +0000 Subject: [PATCH 111/187] chore(notifications): fix build errors --- backend/pkg/commons/types/frontend.go | 2 +- backend/pkg/notification/firebase.go | 10 +++++----- backend/pkg/notification/notifications.go | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/backend/pkg/commons/types/frontend.go b/backend/pkg/commons/types/frontend.go index eb1c04472..a7b637d8f 100644 --- a/backend/pkg/commons/types/frontend.go +++ b/backend/pkg/commons/types/frontend.go @@ -10,7 +10,7 @@ import ( "strings" "time" - "firebase.google.com/go/messaging" + "firebase.google.com/go/v4/messaging" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/gobitfly/beaconchain/pkg/commons/log" diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index 9e42cc49a..acdf303f4 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -7,8 +7,8 @@ import ( "strings" "time" - "firebase.google.com/go/messaging" firebase "firebase.google.com/go/v4" + "firebase.google.com/go/v4/messaging" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/utils" "google.golang.org/api/option" @@ -78,9 +78,9 @@ func SendPushBatch(messages []*messaging.Message, dryRun bool) error { newMessages := make([]*messaging.Message, 0, result.FailureCount) if result.FailureCount > 0 { for i, response := range result.Responses { - logger.Info(response) + log.Info(response) if isRelevantError(response) { - logger.Infof("retrying message %d", i) + log.Infof("retrying message %d", i) newMessages = append(newMessages, currentMessages[i]) resultFailureCount-- } @@ -96,12 +96,12 @@ func SendPushBatch(messages []*messaging.Message, dryRun bool) error { if len(currentMessages) > 0 { for _, response := range result.Responses { if isRelevantError(response) { - logger.WithError(response.Error).WithField("MessageID", response.MessageID).Errorf("firebase error") + log.Error(fmt.Errorf("firebase error, message id: %d, error: %s", response.MessageID, response.Error), "error sending push notifications", 0) resultFailureCount++ } } } - logger.Infof("sent %d firebase notifications in %d of %d tries. successful: %d | failed: %d", len(messages), tries, len(waitBeforeTryInSeconds), resultSuccessCount, resultFailureCount) + log.Infof("sent %d firebase notifications in %d of %d tries. successful: %d | failed: %d", len(messages), tries, len(waitBeforeTryInSeconds), resultSuccessCount, resultFailureCount) return nil } diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 944e2aa99..352b71ca9 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -22,7 +22,7 @@ import ( "time" gcp_bigtable "cloud.google.com/go/bigtable" - "firebase.google.com/go/messaging" + "firebase.google.com/go/v4/messaging" "github.com/ethereum/go-ethereum/common" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" From 98f7a6303facfeb26e6cb97f22625f787ba326ba Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:39:46 +0000 Subject: [PATCH 112/187] chore(notifications): fix relevant error messages --- backend/pkg/notification/firebase.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index acdf303f4..cd1788da5 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -18,7 +18,9 @@ func isRelevantError(response *messaging.SendResponse) bool { if !response.Success && response.Error != nil { // Ignore https://stackoverflow.com/questions/58308835/using-firebase-for-notifications-getting-app-instance-has-been-unregistered // Errors since they indicate that the user token is expired - if !strings.Contains(response.Error.Error(), "registration-token-not-registered") { + if !strings.Contains(response.Error.Error(), "registration-token-not-registered") && + !strings.Contains(response.Error.Error(), "Requested entity was not found.") && + !strings.Contains(response.Error.Error(), "Request contains an invalid argument.") { return true } } From 4f4367f9eff5fd24053436a29b38c697198e6f38 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:41:30 +0000 Subject: [PATCH 113/187] chore(notifications): disable spammy log output --- backend/pkg/notification/firebase.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index cd1788da5..44551b12c 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -80,7 +80,7 @@ func SendPushBatch(messages []*messaging.Message, dryRun bool) error { newMessages := make([]*messaging.Message, 0, result.FailureCount) if result.FailureCount > 0 { for i, response := range result.Responses { - log.Info(response) + //log.Info(response) if isRelevantError(response) { log.Infof("retrying message %d", i) newMessages = append(newMessages, currentMessages[i]) From d3c3a1a44b61b6a87add9d383fa3ef5a9b8cd668 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:44:11 +0000 Subject: [PATCH 114/187] chore(notifications): please linter --- backend/pkg/notification/firebase.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index 44551b12c..3b5264c38 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -98,7 +98,7 @@ func SendPushBatch(messages []*messaging.Message, dryRun bool) error { if len(currentMessages) > 0 { for _, response := range result.Responses { if isRelevantError(response) { - log.Error(fmt.Errorf("firebase error, message id: %d, error: %s", response.MessageID, response.Error), "error sending push notifications", 0) + log.Error(fmt.Errorf("firebase error, message id: %s, error: %s", response.MessageID, response.Error), "error sending push notifications", 0) resultFailureCount++ } } From 8ed5ca29394cf519809fedec4f1c5d9e9bb7746a Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:47:31 +0000 Subject: [PATCH 115/187] chore(notifications): please linter --- backend/pkg/notification/firebase.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/pkg/notification/firebase.go b/backend/pkg/notification/firebase.go index 3b5264c38..b4a75a3d6 100644 --- a/backend/pkg/notification/firebase.go +++ b/backend/pkg/notification/firebase.go @@ -55,14 +55,14 @@ func SendPushBatch(messages []*messaging.Message, dryRun bool) error { return err } - var waitBeforeTryInSeconds = []time.Duration{0, 2, 4, 8, 16} + var waitBeforeTryInSeconds = []int{0, 2, 4, 8, 16} var resultSuccessCount, resultFailureCount int = 0, 0 var result *messaging.BatchResponse currentMessages := messages tries := 0 for _, s := range waitBeforeTryInSeconds { - time.Sleep(s * time.Second) + time.Sleep(time.Duration(s) * time.Second) tries++ if dryRun { result, err = client.SendEachDryRun(context.Background(), currentMessages) From 6bf0a410cac2134716637aabf18a9477d5734641 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Wed, 11 Sep 2024 15:18:34 +0200 Subject: [PATCH 116/187] (BEDS-425) implement endpoints for mobile app bundles (#842) --- backend/cmd/typescript_converter/main.go | 2 +- backend/pkg/api/data_access/app.go | 13 +++++++ backend/pkg/api/data_access/dummy.go | 13 +++++-- backend/pkg/api/handlers/internal.go | 46 ++++++++++++++++++++++++ backend/pkg/api/router.go | 2 ++ backend/pkg/api/types/data_access.go | 11 ++++++ backend/pkg/api/types/mobile.go | 8 +++++ frontend/types/api/mobile.ts | 12 +++++++ 8 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 backend/pkg/api/types/mobile.go create mode 100644 frontend/types/api/mobile.ts diff --git a/backend/cmd/typescript_converter/main.go b/backend/cmd/typescript_converter/main.go index 2a3ce86f5..360e57188 100644 --- a/backend/cmd/typescript_converter/main.go +++ b/backend/cmd/typescript_converter/main.go @@ -21,7 +21,7 @@ const ( ) // Files that should not be converted to TypeScript -var ignoredFiles = []string{"data_access", "search_types"} +var ignoredFiles = []string{"data_access", "search_types", "archiver"} var typeMappings = map[string]string{ "decimal.Decimal": "string /* decimal.Decimal */", diff --git a/backend/pkg/api/data_access/app.go b/backend/pkg/api/data_access/app.go index e3c29d621..812549b1a 100644 --- a/backend/pkg/api/data_access/app.go +++ b/backend/pkg/api/data_access/app.go @@ -1,6 +1,7 @@ package dataaccess import ( + "context" "database/sql" "fmt" "time" @@ -19,6 +20,8 @@ type AppRepository interface { AddMobileNotificationToken(userID uint64, deviceID, notifyToken string) error GetAppSubscriptionCount(userID uint64) (uint64, error) AddMobilePurchase(tx *sql.Tx, userID uint64, paymentDetails t.MobileSubscription, verifyResponse *userservice.VerifyResponse, extSubscriptionId string) error + GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) + IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error } // GetUserIdByRefreshToken basically used to confirm the claimed user id with the refresh token. Returns the userId if successful @@ -105,3 +108,13 @@ func (d *DataAccessService) AddMobilePurchase(tx *sql.Tx, userID uint64, payment return err } + +func (d *DataAccessService) GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) { + // @TODO data access + return d.dummy.GetLatestBundleForNativeVersion(ctx, nativeVersion) +} + +func (d *DataAccessService) IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error { + // @TODO data access + return d.dummy.IncrementBundleDeliveryCount(ctx, bundleVerison) +} diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 6197a72b2..36247ea76 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -11,7 +11,6 @@ import ( "github.com/go-faker/faker/v4" "github.com/go-faker/faker/v4/pkg/options" "github.com/gobitfly/beaconchain/pkg/api/enums" - "github.com/gobitfly/beaconchain/pkg/api/types" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/userservice" "github.com/shopspring/decimal" @@ -637,7 +636,15 @@ func (d *DummyService) GetRocketPoolOverview(ctx context.Context) (*t.RocketPool return getDummyStruct[t.RocketPoolData]() } -func (d *DummyService) GetHealthz(ctx context.Context, showAll bool) types.HealthzData { - r, _ := getDummyData[types.HealthzData]() +func (d *DummyService) GetHealthz(ctx context.Context, showAll bool) t.HealthzData { + r, _ := getDummyData[t.HealthzData]() return r } + +func (d *DummyService) GetLatestBundleForNativeVersion(ctx context.Context, nativeVersion uint64) (*t.MobileAppBundleStats, error) { + return getDummyStruct[t.MobileAppBundleStats]() +} + +func (d *DummyService) IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error { + return nil +} diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 1c4157290..d111ab7ca 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -461,6 +461,52 @@ func (h *HandlerService) InternalGetValidatorDashboardRocketPoolMinipools(w http h.PublicGetValidatorDashboardRocketPoolMinipools(w, r) } +// -------------------------------------- +// Mobile + +func (h *HandlerService) InternalGetMobileLatestBundle(w http.ResponseWriter, r *http.Request) { + var v validationError + q := r.URL.Query() + force := v.checkBool(q.Get("force"), "force") + bundleVersion := v.checkUint(q.Get("bundle_version"), "bundle_version") + nativeVersion := v.checkUint(q.Get("native_version"), "native_version") + if v.hasErrors() { + handleErr(w, r, v) + return + } + stats, err := h.dai.GetLatestBundleForNativeVersion(r.Context(), nativeVersion) + if err != nil { + handleErr(w, r, err) + return + } + var data types.MobileBundleData + data.HasNativeUpdateAvailable = stats.MaxNativeVersion > nativeVersion + // if given bundle version is smaller than the latest and delivery count is less than target count, return the latest bundle + if force || (bundleVersion < stats.LatestBundleVersion && (stats.TargetCount == 0 || stats.DeliveryCount < stats.TargetCount)) { + data.BundleUrl = stats.BundleUrl + } + response := types.GetMobileLatestBundleResponse{ + Data: data, + } + returnOk(w, r, response) +} + +func (h *HandlerService) InternalPostMobileBundleDeliveries(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + bundleVersion := v.checkUint(vars["bundle_version"], "bundle_version") + if v.hasErrors() { + handleErr(w, r, v) + return + } + err := h.dai.IncrementBundleDeliveryCount(r.Context(), bundleVersion) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) +} + // -------------------------------------- // Notifications diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 5603e4856..0b3d6a819 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -93,6 +93,8 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro {http.MethodGet, "/mobile/authorize", nil, hs.InternalPostMobileAuthorize}, {http.MethodPost, "/mobile/equivalent-exchange", nil, hs.InternalPostMobileEquivalentExchange}, {http.MethodPost, "/mobile/purchase", nil, hs.InternalHandleMobilePurchase}, + {http.MethodGet, "/mobile/latest-bundle", nil, hs.InternalGetMobileLatestBundle}, + {http.MethodPost, "/mobile/bundles/{bundle_version}/deliveries", nil, hs.InternalPostMobileBundleDeliveries}, {http.MethodPost, "/logout", nil, hs.InternalPostLogout}, diff --git a/backend/pkg/api/types/data_access.go b/backend/pkg/api/types/data_access.go index fb1257f62..49d88b926 100644 --- a/backend/pkg/api/types/data_access.go +++ b/backend/pkg/api/types/data_access.go @@ -227,3 +227,14 @@ type HealthzData struct { DeploymentType string `json:"deployment_type"` Reports map[string][]HealthzResult `json:"status_reports"` } + +// ------------------------- +// Mobile structs + +type MobileAppBundleStats struct { + LatestBundleVersion uint64 + BundleUrl string + TargetCount uint64 // coalesce to 0 if column is null + DeliveryCount uint64 + MaxNativeVersion uint64 // the max native version of the whole table for the given environment +} diff --git a/backend/pkg/api/types/mobile.go b/backend/pkg/api/types/mobile.go new file mode 100644 index 000000000..7f84a999d --- /dev/null +++ b/backend/pkg/api/types/mobile.go @@ -0,0 +1,8 @@ +package types + +type MobileBundleData struct { + BundleUrl string `json:"bundle_url,omitempty"` + HasNativeUpdateAvailable bool `json:"has_native_update_available"` +} + +type GetMobileLatestBundleResponse ApiDataResponse[MobileBundleData] diff --git a/frontend/types/api/mobile.ts b/frontend/types/api/mobile.ts new file mode 100644 index 000000000..d6b234a18 --- /dev/null +++ b/frontend/types/api/mobile.ts @@ -0,0 +1,12 @@ +// Code generated by tygo. DO NOT EDIT. +/* eslint-disable */ +import type { ApiDataResponse } from './common' + +////////// +// source: mobile.go + +export interface MobileBundleData { + bundle_url?: string; + has_native_update_available: boolean; +} +export type GetMobileLatestBundleResponse = ApiDataResponse; From e1f86fbf2284f3559e1407addf69bbdb206a2e19 Mon Sep 17 00:00:00 2001 From: MarcelBitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Thu, 12 Sep 2024 08:26:06 +0200 Subject: [PATCH 117/187] fix(DashboardHeader): only show `accounts button` in `development` --- frontend/components/dashboard/DashboardHeader.vue | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/frontend/components/dashboard/DashboardHeader.vue b/frontend/components/dashboard/DashboardHeader.vue index 8db7f0d54..1e1f0b3e1 100644 --- a/frontend/components/dashboard/DashboardHeader.vue +++ b/frontend/components/dashboard/DashboardHeader.vue @@ -90,10 +90,12 @@ const items = computed(() => { const cd = db as CookieDashboard return createMenuBarButton('validator', getDashboardName(cd), `${cd.hash !== undefined ? cd.hash : cd.id}`) })) - addToSortedItems($t('dashboard.header.account'), dashboards.value?.validator_dashboards?.slice(0, 1).map((db) => { - const cd = db as CookieDashboard - return createMenuBarButton('account', getDashboardName(cd), `${cd.hash ?? cd.id}`) - })) + if (showInDevelopment) { + addToSortedItems($t('dashboard.header.account'), dashboards.value?.validator_dashboards?.slice(0, 1).map((db) => { + const cd = db as CookieDashboard + return createMenuBarButton('account', getDashboardName(cd), `${cd.hash ?? cd.id}`) + })) + } const disabledTooltip = !showInDevelopment ? $t('common.coming_soon') : undefined const onNotificationsPage = dashboardType.value === 'notifications' addToSortedItems($t('notifications.title'), [ { From 402b034f63371392e3539d41c73f69fc5d2549b0 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:02:36 +0200 Subject: [PATCH 118/187] BEDS-306: monitoring: shorter minimum expiry delta of status reports --- backend/pkg/monitoring/services/base.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/pkg/monitoring/services/base.go b/backend/pkg/monitoring/services/base.go index 549b4d67f..48effa38d 100644 --- a/backend/pkg/monitoring/services/base.go +++ b/backend/pkg/monitoring/services/base.go @@ -65,8 +65,8 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat if timeout != constants.Default { timeouts_at = now.Add(timeout) } - expires_at := timeouts_at.Add(5 * time.Minute) - if check_interval >= 5*time.Minute { + expires_at := timeouts_at.Add(1 * time.Minute) + if check_interval >= 1*time.Minute { expires_at = timeouts_at.Add(check_interval) } log.TraceWithFields(log.Fields{ From b50cbf80f075d5620b643b640da6dfe4155d6da2 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:03:47 +0200 Subject: [PATCH 119/187] BEDS-306: monitoring: close clickhouse native writer if automagically opened no way this will ever back fire right --- backend/pkg/monitoring/monitoring.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/backend/pkg/monitoring/monitoring.go b/backend/pkg/monitoring/monitoring.go index b24c3cb9a..dbf021a77 100644 --- a/backend/pkg/monitoring/monitoring.go +++ b/backend/pkg/monitoring/monitoring.go @@ -1,6 +1,9 @@ package monitoring import ( + "sync" + "sync/atomic" + "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" @@ -11,11 +14,17 @@ import ( ) var monitoredServices []services.Service +var startedClickhouse atomic.Bool +var initMutex = sync.Mutex{} func Init(full bool) { + initMutex.Lock() + defer initMutex.Unlock() metrics.UUID.WithLabelValues(utils.GetUUID()).Set(1) // so we can find out where the uuid is set metrics.DeploymentType.WithLabelValues(utils.Config.DeploymentType).Set(1) if db.ClickHouseNativeWriter == nil { + log.Infof("initializing clickhouse writer") + startedClickhouse.Store(true) db.ClickHouseNativeWriter = db.MustInitClickhouseNative(&types.DatabaseConfig{ Username: utils.Config.ClickHouse.WriterDatabase.Username, Password: utils.Config.ClickHouse.WriterDatabase.Password, @@ -58,4 +67,7 @@ func Stop() { } // this prevents status reports that werent shut down cleanly from triggering alerts services.NewStatusReport(constants.CleanShutdownEvent, constants.Default, constants.Default)(constants.Success, nil) + if startedClickhouse.Load() { + db.ClickHouseNativeWriter.Close() + } } From dd4cd42c2b1bb80523dcbb18f0043ad1749e9133 Mon Sep 17 00:00:00 2001 From: invis-bitfly <162128378+invis-bitfly@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:05:30 +0200 Subject: [PATCH 120/187] BEDS-306: monitoring: make status reports shoot and forget --- backend/pkg/monitoring/services/base.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/monitoring/services/base.go b/backend/pkg/monitoring/services/base.go index 48effa38d..04cbea094 100644 --- a/backend/pkg/monitoring/services/base.go +++ b/backend/pkg/monitoring/services/base.go @@ -83,7 +83,7 @@ func NewStatusReport(id string, timeout time.Duration, check_interval time.Durat err = db.ClickHouseNativeWriter.AsyncInsert( ctx, "INSERT INTO status_reports (emitter, event_id, deployment_type, insert_id, expires_at, timeouts_at, metadata) VALUES (?, ?, ?, ?, ?, ?, ?)", - true, + false, // true means wait for settlement, but we want to shoot and forget. false does mean we cant log any errors that occur during settlement utils.GetUUID(), id, utils.Config.DeploymentType, From 688c3997c1ed9f1ad2f7d2d67052ceedfe95b919 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Mon, 16 Sep 2024 08:48:27 +0000 Subject: [PATCH 121/187] fix(notifications): correct wrong sql syntax --- backend/pkg/notification/db.go | 2 ++ backend/pkg/notification/notifications.go | 10 +++++----- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/backend/pkg/notification/db.go b/backend/pkg/notification/db.go index 1f999a411..d065f4e87 100644 --- a/backend/pkg/notification/db.go +++ b/backend/pkg/notification/db.go @@ -59,6 +59,8 @@ func GetSubsForEventFilter(eventName types.EventName, lastSentFilter string, las return nil, err } + log.Info(query) + subMap := make(map[string][]types.Subscription, 0) err = db.FrontendWriterDB.Select(&subs, query, args...) if err != nil { diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 352b71ca9..03b27ef44 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1952,7 +1952,7 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU dbResult, err := GetSubsForEventFilter( types.EthClientUpdateEventName, - "(us.last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > us.last_sent_ts) OR us.last_sent_ts IS NULL", + "((last_sent_ts <= NOW() - INTERVAL '2 DAY' AND TO_TIMESTAMP(?) > last_sent_ts) OR last_sent_ts IS NULL)", []interface{}{client.Date.Unix()}, []string{strings.ToLower(client.Name)}) if err != nil { @@ -2074,7 +2074,7 @@ func collectMonitoringMachine( dbResult, err := GetSubsForEventFilter( eventName, - "us.created_epoch <= ? AND (us.last_sent_epoch < (? - ?) OR us.last_sent_epoch IS NULL)", + "(created_epoch <= ? AND (last_sent_epoch < (? - ?) OR last_sent_epoch IS NULL))", []interface{}{epoch, epoch, epochWaitInBetween}, nil, ) @@ -2325,7 +2325,7 @@ func collectTaxReportNotificationNotifications(notificationsByUserID types.Notif dbResults, err := GetSubsForEventFilter( types.TaxReportEventName, - "us.last_sent_ts < ? OR (us.last_sent_ts IS NULL AND us.created_ts < ?)", + "(last_sent_ts < ? OR (last_sent_ts IS NULL AND created_ts < ?))", []interface{}{firstDayOfMonth, firstDayOfMonth}, nil, ) @@ -2487,7 +2487,7 @@ func collectRocketpoolComissionNotifications(notificationsByUserID types.Notific dbResult, err := GetSubsForEventFilter( types.RocketpoolCommissionThresholdEventName, - "(us.last_sent_ts <= NOW() - INTERVAL '8 hours' OR us.last_sent_ts IS NULL) AND (us.event_threshold <= ? OR (us.event_threshold < 0 AND us.event_threshold * -1 >= ?)", + "(last_sent_ts <= NOW() - INTERVAL '8 hours' OR last_sent_ts IS NULL) AND (event_threshold <= ? OR (event_threshold < 0 AND event_threshold * -1 >= ?))", []interface{}{fee, fee}, nil, ) @@ -2539,7 +2539,7 @@ func collectRocketpoolRewardClaimRoundNotifications(notificationsByUserID types. dbResult, err := GetSubsForEventFilter( types.RocketpoolNewClaimRoundStartedEventName, - "us.last_sent_ts <= NOW() - INTERVAL '5 hours' OR us.last_sent_ts IS NULL", + "(last_sent_ts <= NOW() - INTERVAL '5 hours' OR last_sent_ts IS NULL)", nil, nil, ) From 45380c276f861e602713c3708f1fb441f2b032ca Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Tue, 10 Sep 2024 13:36:26 +0200 Subject: [PATCH 122/187] feat: add NotificationsClientTable component with store and API integration - Created a new NotificationsClientsTable.vue component to display `notifications client` data. - Implemented a new useNotificationsClientsStore for handling the API calls related to `notifications client` data. - Updated `customFetch.ts` to include the new API endpoints and methods for fetching `notifications client` data. - Added translations for the `NotificationsClientsTable.vue` component in the en.json file. - Applied code styling updates in `notifications.vue` and added the ClientsTab to display the `NotificationsClientsTable.vue` component. See: BEDS-324 --- .../NotificationsClientsTable.vue | 189 ++++++++++++++++++ frontend/locales/en.json | 11 + frontend/pages/notifications.vue | 8 +- .../notifications/managementDashboard.json | 31 --- .../useNotificationsClientsStore.ts | 69 +++++++ frontend/types/customFetch.ts | 5 + 6 files changed, 281 insertions(+), 32 deletions(-) create mode 100644 frontend/components/notifications/NotificationsClientsTable.vue delete mode 100644 frontend/public/mock/notifications/managementDashboard.json create mode 100644 frontend/stores/notifications/useNotificationsClientsStore.ts diff --git a/frontend/components/notifications/NotificationsClientsTable.vue b/frontend/components/notifications/NotificationsClientsTable.vue new file mode 100644 index 000000000..470396516 --- /dev/null +++ b/frontend/components/notifications/NotificationsClientsTable.vue @@ -0,0 +1,189 @@ + + + + + diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 33f2e6705..302cfa091 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -561,6 +561,17 @@ "yes": "Yes" }, "notifications": { + "clients": { + "col": { + "client_name": "Client ", + "version": "Version" + }, + "footer":{ + "subscriptions": "Clients ({count} Subscriptions)" + }, + "search_placeholder":"Client", + "title": "Clients" + }, "col": { "dashboard": "Dashboard", "group": "Group", diff --git a/frontend/pages/notifications.vue b/frontend/pages/notifications.vue index 968b4fe73..5d591e793 100644 --- a/frontend/pages/notifications.vue +++ b/frontend/pages/notifications.vue @@ -97,7 +97,8 @@ const openManageNotifications = () => { />
{ @open-dialog="openManageNotifications" /> +
diff --git a/frontend/public/mock/notifications/managementDashboard.json b/frontend/public/mock/notifications/managementDashboard.json deleted file mode 100644 index eb0d14c72..000000000 --- a/frontend/public/mock/notifications/managementDashboard.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "paging": { - "total_count": 999 - }, - "data": [ - { - "group_id": 123, - "dashboard_id": 222, - "dashboard_name": "My test dashboard", - "dashboard_type": "validator", - "subscriptions": ["missed_attestation"], - "webhook": { - "url": "", - "via_discord": false - }, - "networks": [1] - }, - { - "group_id": 123, - "dashboard_id": 222, - "dashboard_name": "My account dashboard", - "dashboard_type": "account", - "subscriptions": ["missed_attestation", "proposed_attestation"], - "webhook": { - "url": "https://discord.com/some-webhook-link", - "via_discord": false - }, - "networks": [1, 10, 42161, 8453] - } - ] -} diff --git a/frontend/stores/notifications/useNotificationsClientsStore.ts b/frontend/stores/notifications/useNotificationsClientsStore.ts new file mode 100644 index 000000000..d5dbe9a9d --- /dev/null +++ b/frontend/stores/notifications/useNotificationsClientsStore.ts @@ -0,0 +1,69 @@ +import { defineStore } from 'pinia' +import type { InternalGetUserNotificationClientsResponse } from '~/types/api/notifications' +import { API_PATH } from '~/types/customFetch' +import type { TableQueryParams } from '~/types/datatable' + +const notificationsClientStore = defineStore('notifications-clients-store', () => { + const data = ref() + return { data } +}) + +export function useNotificationsClientStore() { + const { isLoggedIn } = useUserStore() + + const { fetch } = useCustomFetch() + const { data } = storeToRefs(notificationsClientStore()) + const { + cursor, isStoredQuery, onSort, pageSize, pendingQuery, query, setCursor, setPageSize, setSearch, setStoredQuery, + } = useTableQuery({ + limit: 10, sort: 'timestamp:desc', + }, 10) + const isLoading = ref(false) + + async function loadClientsNotifications(q: TableQueryParams) { + isLoading.value = true + setStoredQuery(q) + try { + const result = await fetch( + API_PATH.NOTIFICATIONS_CLIENTS, + undefined, + undefined, + q, + ) + + isLoading.value = false + if (!isStoredQuery(q)) { + return // in case some query params change while loading + } + + data.value = result + } + catch (e) { + data.value = undefined + isLoading.value = false + } + return data.value + } + + const clientsNotifications = computed(() => { + return data.value + }) + + watch(query, (q) => { + if (q) { + isLoggedIn.value && loadClientsNotifications(q) + } + }, { immediate: true }) + + return { + clientsNotifications, + cursor, + isLoading, + onSort, + pageSize, + query: pendingQuery, + setCursor, + setPageSize, + setSearch, + } +} diff --git a/frontend/types/customFetch.ts b/frontend/types/customFetch.ts index dcc3ec474..18b2d3ea1 100644 --- a/frontend/types/customFetch.ts +++ b/frontend/types/customFetch.ts @@ -40,6 +40,7 @@ export enum API_PATH { LATEST_STATE = '/latestState', LOGIN = '/login', LOGOUT = '/logout', + NOTIFICATIONS_CLIENTS = '/notifications/clients', NOTIFICATIONS_DASHBOARDS = '/notifications/dashboards', NOTIFICATIONS_MACHINE = '/notifications/machines', NOTIFICATIONS_MANAGEMENT_GENERAL = '/notifications/managementGeneral', @@ -279,6 +280,10 @@ export const mapping: Record = { mock: false, path: '/logout', }, + [API_PATH.NOTIFICATIONS_CLIENTS]: { + method: 'GET', + path: '/users/me/notifications/clients', + }, [API_PATH.NOTIFICATIONS_DASHBOARDS]: { path: '/users/me/notifications/dashboards', }, From 2c55bd2d88950cdfdd1242e4760ba469e05b1fad Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 16 Sep 2024 13:24:06 +0200 Subject: [PATCH 123/187] refactor(eth1indexer): update ens in seperate go-routine (#858) --- backend/cmd/eth1indexer/main.go | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/backend/cmd/eth1indexer/main.go b/backend/cmd/eth1indexer/main.go index badcc7745..d6198b22a 100644 --- a/backend/cmd/eth1indexer/main.go +++ b/backend/cmd/eth1indexer/main.go @@ -189,6 +189,10 @@ func Run() { }() } + if *enableEnsUpdater { + go ImportEnsUpdatesLoop(bt, client, *ensBatchSize) + } + if *enableFullBalanceUpdater { ProcessMetadataUpdates(bt, client, balanceUpdaterPrefix, *balanceUpdaterBatchSize, -1) return @@ -375,14 +379,6 @@ func Run() { ProcessMetadataUpdates(bt, client, balanceUpdaterPrefix, *balanceUpdaterBatchSize, 10) } - if *enableEnsUpdater { - err := bt.ImportEnsUpdates(client.GetNativeClient(), *ensBatchSize) - if err != nil { - log.Error(err, "error importing ens updates", 0, nil) - continue - } - } - log.Infof("index run completed") services.ReportStatus("eth1indexer", "Running", nil) } @@ -390,6 +386,19 @@ func Run() { // utils.WaitForCtrlC() } +func ImportEnsUpdatesLoop(bt *db.Bigtable, client *rpc.ErigonClient, batchSize int64) { + time.Sleep(time.Second * 5) + for { + err := bt.ImportEnsUpdates(client.GetNativeClient(), batchSize) + if err != nil { + log.Error(err, "error importing ens updates", 0, nil) + } else { + services.ReportStatus("ensIndexer", "Running", nil) + } + time.Sleep(time.Second * 5) + } +} + func UpdateTokenPrices(bt *db.Bigtable, client *rpc.ErigonClient, tokenListPath string) error { tokenListContent, err := os.ReadFile(tokenListPath) if err != nil { From 83884000d790cb69e44b3231551967e0fa16e515 Mon Sep 17 00:00:00 2001 From: Manuel <5877862+manuelsc@users.noreply.github.com> Date: Mon, 16 Sep 2024 14:39:16 +0200 Subject: [PATCH 124/187] BEDS-239: map apple product ids to internal ids --- .../pkg/userservice/appsubscription_oracle.go | 22 ++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/backend/pkg/userservice/appsubscription_oracle.go b/backend/pkg/userservice/appsubscription_oracle.go index 0b078674e..0f50ed916 100644 --- a/backend/pkg/userservice/appsubscription_oracle.go +++ b/backend/pkg/userservice/appsubscription_oracle.go @@ -233,13 +233,29 @@ func rejectReason(valid bool) string { return "expired" } +// first 3 trillion dollar company and you can't reuse ids +func mapAppleProductID(productID string) string { + mappings := map[string]string{ + "orca.yearly.apple": "orca.yearly", + "orca.apple": "orca", + "dolphin.yearly.apple": "dolphin.yearly", + "dolphin.apple": "dolphin", + "guppy.yearly.apple": "guppy.yearly", + "guppy.apple": "guppy", + } + if mapped, ok := mappings[productID]; ok { + return mapped + } + return productID +} + func verifyApple(apple *api.StoreClient, receipt *types.PremiumData) (*VerifyResponse, error) { response := &VerifyResponse{ Valid: false, ExpirationDate: 0, RejectReason: "", - ProductID: receipt.ProductID, // may be changed by this function to be different than receipt.ProductID - Receipt: receipt.Receipt, // may be changed by this function to be different than receipt.Receipt + ProductID: mapAppleProductID(receipt.ProductID), // may be changed by this function to be different than receipt.ProductID + Receipt: receipt.Receipt, // may be changed by this function to be different than receipt.Receipt } if apple == nil { @@ -300,7 +316,7 @@ func verifyApple(apple *api.StoreClient, receipt *types.PremiumData) (*VerifyRes response.RejectReason = "invalid_product_id" return response, nil } - response.ProductID = productId // update response to reflect the resolved product id + response.ProductID = mapAppleProductID(productId) // update response to reflect the resolved product id expiresDateFloat, ok := claims["expiresDate"].(float64) if !ok { From c141b4c79e7ee6ffac857ede972a48be72ef82b6 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 17 Sep 2024 09:43:56 +0000 Subject: [PATCH 125/187] fix(notifications): add last sent field to webhook data retrieval query --- backend/pkg/notification/notifications.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 03b27ef44..357c81a35 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -837,6 +837,7 @@ func queueWebhookNotifications(notificationsByUserID types.NotificationsPerUserI url, retries, event_names, + last_sent, destination FROM users_webhooks From e9cef55731f48062d232c67d094a8523fa70192e Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 17 Sep 2024 12:10:28 +0200 Subject: [PATCH 126/187] (BEDS-479) fix notification settings dummy generation (#860) --- backend/pkg/api/data_access/dummy.go | 4 ++-- backend/pkg/api/types/notifications.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 36247ea76..97bc9e369 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -43,9 +43,9 @@ func NewDummyService() *DummyService { return &DummyService{} } -// generate random decimal.Decimal, should result in somewhere around 0.001 ETH (+/- a few decimal places) in Wei +// generate random decimal.Decimal, result is between 0.001 and 1000 GWei (returned in Wei) func randomEthDecimal() decimal.Decimal { - decimal, _ := decimal.NewFromString(fmt.Sprintf("%d00000000000", rand.Int64N(10000000))) //nolint:gosec + decimal, _ := decimal.NewFromString(fmt.Sprintf("%d000000", rand.Int64N(1000000)+1)) //nolint:gosec return decimal } diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 323f6ca00..f183988b1 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -141,8 +141,8 @@ type InternalGetUserNotificationNetworksResponse ApiPagingResponse[NotificationN // ------------------------------------------------------------ // Notification Settings type NotificationSettingsNetwork struct { - GasAboveThreshold decimal.Decimal `json:"gas_above_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled - GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled + GasAboveThreshold decimal.Decimal `json:"gas_above_threshold" faker:"eth"` // 0 is disabled + GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"eth"` // 0 is disabled ParticipationRateThreshold float64 `json:"participation_rate_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled } type NotificationNetwork struct { From 54dc9593b1036cdeaf7bc04d1a18a9f40227e4a7 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:12:38 +0000 Subject: [PATCH 127/187] fix(dashboard): return network id in all cases --- backend/pkg/api/api_test.go | 1 + backend/pkg/api/data_access/vdb_management.go | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index c422bf17f..15e4b8a64 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -389,6 +389,7 @@ func TestPublicAndSharedDashboards(t *testing.T) { numValidators := resp.Data.Validators.Exited + resp.Data.Validators.Offline + resp.Data.Validators.Pending + resp.Data.Validators.Online + resp.Data.Validators.Slashed assert.Greater(t, numValidators, uint64(0), "dashboard should contain at least one validator") assert.Greater(t, len(resp.Data.Groups), 0, "dashboard should contain at least one group") + assert.Greater(t, resp.Data.Network, uint64(0), "dashboard should contain a network id greater than 0") }) t.Run(fmt.Sprintf("[%s]: test group summary", dashboardId.id), func(t *testing.T) { diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 9163a51ab..93b3d1f72 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -287,8 +287,10 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d id = $1` return d.alloyReader.GetContext(ctx, &data.Network, query, dashboardId.Id) }) + } else { // load the chain id from the config in case of public dashboards + data.Network = utils.Config.Chain.ClConfig.DepositChainID + log.Info(utils.Config.Chain.ClConfig.DepositChainID) } - // TODO handle network of validator set dashboards // Groups if dashboardId.Validators == nil && !dashboardId.AggregateGroups { @@ -514,6 +516,8 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d return nil, fmt.Errorf("error retrieving validator dashboard overview data: %v", err) } + log.Info(data.Network) + return &data, nil } From 9bc77b239907e62186519efa99fd57dcad9139a3 Mon Sep 17 00:00:00 2001 From: peter <1674920+peterbitfly@users.noreply.github.com> Date: Tue, 17 Sep 2024 10:20:49 +0000 Subject: [PATCH 128/187] chore(dashboard): remove unnecessary log statements --- backend/pkg/api/data_access/vdb_management.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 93b3d1f72..a96dced54 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -289,7 +289,6 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d }) } else { // load the chain id from the config in case of public dashboards data.Network = utils.Config.Chain.ClConfig.DepositChainID - log.Info(utils.Config.Chain.ClConfig.DepositChainID) } // Groups @@ -516,8 +515,6 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d return nil, fmt.Errorf("error retrieving validator dashboard overview data: %v", err) } - log.Info(data.Network) - return &data, nil } From 3ff4be3781d99f302204338d2727a3235154e73e Mon Sep 17 00:00:00 2001 From: Patrick Pfeiffer Date: Tue, 17 Sep 2024 13:41:50 +0200 Subject: [PATCH 129/187] chore(notifications): better check for webhook-ratelimit --- backend/pkg/notification/notifications.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/pkg/notification/notifications.go b/backend/pkg/notification/notifications.go index 357c81a35..89ec6c858 100644 --- a/backend/pkg/notification/notifications.go +++ b/backend/pkg/notification/notifications.go @@ -1164,7 +1164,7 @@ func sendDiscordNotifications() error { resp.Body.Close() } - if strings.Contains(errResp.Body, "You are being rate limited") { + if resp.StatusCode == http.StatusTooManyRequests { log.Warnf("could not push to discord webhook due to rate limit. %v url: %v", errResp.Body, webhook.Url) } else { log.Error(nil, "error pushing discord webhook", 0, map[string]interface{}{"errResp.Body": errResp.Body, "webhook.Url": webhook.Url}) From fa2d7bca5bce31d583d7ea8fbb8b9c2736fdee05 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:42:10 +0200 Subject: [PATCH 130/187] (BEDS-295) implement automated api doc generation (#748) --- .../workflows/backend-integration-test.yml | 4 +- backend/.gitignore | 3 +- backend/Makefile | 2 +- backend/cmd/typescript_converter/main.go | 2 +- backend/go.mod | 6 +++ backend/go.sum | 13 +++++- backend/pkg/api/api_test.go | 43 +++++++++++++++++++ backend/pkg/api/data_access/data_access.go | 12 +++--- backend/pkg/api/data_access/dummy.go | 6 +++ backend/pkg/api/data_access/ratelimit.go | 22 ++++++++++ backend/pkg/api/docs/static.go | 6 +++ backend/pkg/api/handlers/common.go | 3 +- backend/pkg/api/handlers/internal.go | 27 ++++++++++-- backend/pkg/api/handlers/public.go | 43 ++++++++++++++++--- backend/pkg/api/router.go | 5 +++ backend/pkg/api/types/data_access.go | 1 - backend/pkg/api/types/ratelimit.go | 10 +++++ backend/pkg/commons/ratelimit/ratelimit.go | 3 +- backend/pkg/commons/utils/config.go | 1 + frontend/types/api/ratelimit.ts | 14 ++++++ 20 files changed, 200 insertions(+), 26 deletions(-) create mode 100644 backend/pkg/api/data_access/ratelimit.go create mode 100644 backend/pkg/api/docs/static.go create mode 100644 backend/pkg/api/types/ratelimit.go create mode 100644 frontend/types/api/ratelimit.ts diff --git a/.github/workflows/backend-integration-test.yml b/.github/workflows/backend-integration-test.yml index 48243480e..c95d43c70 100644 --- a/.github/workflows/backend-integration-test.yml +++ b/.github/workflows/backend-integration-test.yml @@ -33,7 +33,9 @@ jobs: cache-dependency-path: 'backend/go.sum' - name: Test with the Go CLI working-directory: backend - run: go test -failfast ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" + run: + go install github.com/swaggo/swag/cmd/swag@latest && swag init --ot json -o ./pkg/api/docs -d ./pkg/api/ -g ./handlers/public.go + go test -failfast ./pkg/api/... -config "${{ secrets.CI_CONFIG_PATH }}" diff --git a/backend/.gitignore b/backend/.gitignore index 7006633d8..b5f10c4da 100644 --- a/backend/.gitignore +++ b/backend/.gitignore @@ -5,4 +5,5 @@ local_deployment/config.yml local_deployment/elconfig.json local_deployment/.env __gitignore -cmd/playground \ No newline at end of file +cmd/playground +pkg/api/docs/swagger.json diff --git a/backend/Makefile b/backend/Makefile index 01c3705ba..dd099490d 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -10,8 +10,8 @@ CGO_CFLAGS_ALLOW="-O -D__BLST_PORTABLE__" all: mkdir -p bin + go install github.com/swaggo/swag/cmd/swag@latest && swag init --ot json -o ./pkg/api/docs -d ./pkg/api/ -g ./handlers/public.go CGO_CFLAGS=${CGO_CFLAGS} CGO_CFLAGS_ALLOW=${CGO_CFLAGS_ALLOW} go build --ldflags=${LDFLAGS} -o ./bin/bc ./cmd/main.go - clean: rm -rf bin diff --git a/backend/cmd/typescript_converter/main.go b/backend/cmd/typescript_converter/main.go index 360e57188..da6fa5be9 100644 --- a/backend/cmd/typescript_converter/main.go +++ b/backend/cmd/typescript_converter/main.go @@ -31,7 +31,7 @@ var typeMappings = map[string]string{ // Expects the following flags: // -out: Output folder for the generated TypeScript file -// Standard usage (execute in backend folder): go run cmd/typescript_converter/main.go -out ../frontend/types/api +// Standard usage (execute in backend folder): go run cmd/main.go typescript-converter -out ../frontend/types/api func Run() { var out string diff --git a/backend/go.mod b/backend/go.mod index b53006df9..59fc22b83 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -24,6 +24,7 @@ require ( github.com/fergusstrange/embedded-postgres v1.29.0 github.com/gavv/httpexpect/v2 v2.16.0 github.com/go-faker/faker/v4 v4.3.0 + github.com/go-openapi/spec v0.20.14 github.com/go-redis/redis/v8 v8.11.5 github.com/gobitfly/eth-rewards v0.1.2-0.20230403064929-411ddc40a5f7 github.com/gobitfly/eth.store v0.0.0-20240312111708-b43f13990280 @@ -139,6 +140,9 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-openapi/swag v0.22.9 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/goccy/go-yaml v1.9.5 // indirect @@ -178,6 +182,7 @@ require ( github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/jbenet/goprocess v0.1.4 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.7 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect @@ -264,6 +269,7 @@ require ( lukechampine.com/blake3 v1.2.1 // indirect moul.io/http2curl/v2 v2.3.0 // indirect rsc.io/tmplfunc v0.0.3 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) replace github.com/wealdtech/go-merkletree v1.0.1-0.20190605192610-2bb163c2ea2a => github.com/rocket-pool/go-merkletree v1.0.1-0.20220406020931-c262d9b976dd diff --git a/backend/go.sum b/backend/go.sum index 7d41b5693..34c1e36af 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -285,6 +285,14 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= +github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= @@ -562,6 +570,7 @@ github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -1284,5 +1293,5 @@ rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= rsc.io/tmplfunc v0.0.3/go.mod h1:AG3sTPzElb1Io3Yg4voV9AGZJuleGAwaVRxL9M49PhA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/backend/pkg/api/api_test.go b/backend/pkg/api/api_test.go index c422bf17f..a38763240 100644 --- a/backend/pkg/api/api_test.go +++ b/backend/pkg/api/api_test.go @@ -9,12 +9,14 @@ import ( "net/http/httptest" "os" "os/exec" + "slices" "sort" "testing" "time" embeddedpostgres "github.com/fergusstrange/embedded-postgres" "github.com/gavv/httpexpect/v2" + "github.com/go-openapi/spec" "github.com/gobitfly/beaconchain/pkg/api" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" api_types "github.com/gobitfly/beaconchain/pkg/api/types" @@ -25,6 +27,7 @@ import ( "github.com/jmoiron/sqlx" "github.com/pressly/goose/v3" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/crypto/bcrypt" ) @@ -111,6 +114,16 @@ func setup() error { return fmt.Errorf("error inserting user 2: %w", err) } + // insert dummy api weight for testing + _, err = tempDb.Exec(` + INSERT INTO api_weights (bucket, endpoint, method, params, weight, valid_from) + VALUES ($1, $2, $3, $4, $5, TO_TIMESTAMP($6))`, + "default", "/api/v2/test-ratelimit", "GET", "", 2, time.Now().Unix(), + ) + if err != nil { + return fmt.Errorf("error inserting api weight: %w", err) + } + cfg := &types.Config{} err = utils.ReadConfig(cfg, *configPath) if err != nil { @@ -469,3 +482,33 @@ func TestPublicAndSharedDashboards(t *testing.T) { }) } } + +func TestApiDoc(t *testing.T) { + e := httpexpect.WithConfig(getExpectConfig(t, ts)) + + t.Run("test api doc json", func(t *testing.T) { + resp := spec.Swagger{} + e.GET("/api/v2/docs/swagger.json"). + Expect(). + Status(http.StatusOK).JSON().Decode(&resp) + + assert.Equal(t, "/api/v2", resp.BasePath, "swagger base path should be '/api/v2'") + require.NotNil(t, 0, resp.Paths, "swagger paths should not nil") + assert.NotEqual(t, 0, len(resp.Paths.Paths), "swagger paths should not be empty") + assert.NotEqual(t, 0, len(resp.Definitions), "swagger definitions should not be empty") + assert.NotEqual(t, 0, len(resp.Host), "swagger host should not be empty") + }) + + t.Run("test api ratelimit weights endpoint", func(t *testing.T) { + resp := api_types.InternalGetRatelimitWeightsResponse{} + e.GET("/api/i/ratelimit-weights"). + Expect(). + Status(http.StatusOK).JSON().Decode(&resp) + + assert.GreaterOrEqual(t, len(resp.Data), 1, "ratelimit weights should contain at least one entry") + testEndpointIndex := slices.IndexFunc(resp.Data, func(item api_types.ApiWeightItem) bool { + return item.Endpoint == "/api/v2/test-ratelimit" + }) + assert.GreaterOrEqual(t, testEndpointIndex, 0, "ratelimit weights should contain an entry for /api/v2/test-ratelimit") + }) +} diff --git a/backend/pkg/api/data_access/data_access.go b/backend/pkg/api/data_access/data_access.go index c4b3b8338..3fc31d105 100644 --- a/backend/pkg/api/data_access/data_access.go +++ b/backend/pkg/api/data_access/data_access.go @@ -13,7 +13,6 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/types" - "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/jmoiron/sqlx" "github.com/pkg/errors" ) @@ -29,6 +28,7 @@ type DataAccessor interface { BlockRepository ArchiverRepository ProtocolRepository + RatelimitRepository HealthzRepository StartDataAccessServices() @@ -203,7 +203,7 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { wg.Add(1) go func() { defer wg.Done() - bt, err := db.InitBigtable(utils.Config.Bigtable.Project, utils.Config.Bigtable.Instance, fmt.Sprintf("%d", utils.Config.Chain.ClConfig.DepositChainID), utils.Config.RedisCacheEndpoint) + bt, err := db.InitBigtable(cfg.Bigtable.Project, cfg.Bigtable.Instance, fmt.Sprintf("%d", cfg.Chain.ClConfig.DepositChainID), cfg.RedisCacheEndpoint) if err != nil { log.Fatal(err, "error connecting to bigtable", 0) } @@ -211,11 +211,11 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { }() // Initialize the tiered cache (redis) - if utils.Config.TieredCacheProvider == "redis" || len(utils.Config.RedisCacheEndpoint) != 0 { + if cfg.TieredCacheProvider == "redis" || len(cfg.RedisCacheEndpoint) != 0 { wg.Add(1) go func() { defer wg.Done() - cache.MustInitTieredCache(utils.Config.RedisCacheEndpoint) + cache.MustInitTieredCache(cfg.RedisCacheEndpoint) log.Infof("tiered Cache initialized, latest finalized epoch: %v", cache.LatestFinalizedEpoch.Get()) }() } @@ -225,7 +225,7 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { go func() { defer wg.Done() rdc := redis.NewClient(&redis.Options{ - Addr: utils.Config.RedisSessionStoreEndpoint, + Addr: cfg.RedisSessionStoreEndpoint, ReadTimeout: time.Second * 60, }) @@ -237,7 +237,7 @@ func createDataAccessService(cfg *types.Config) *DataAccessService { wg.Wait() - if utils.Config.TieredCacheProvider != "redis" { + if cfg.TieredCacheProvider != "redis" { log.Fatal(fmt.Errorf("no cache provider set, please set TierdCacheProvider (example redis)"), "", 0) } diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 97bc9e369..4590a106b 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -636,6 +636,12 @@ func (d *DummyService) GetRocketPoolOverview(ctx context.Context) (*t.RocketPool return getDummyStruct[t.RocketPoolData]() } +func (d *DummyService) GetApiWeights(ctx context.Context) ([]t.ApiWeightItem, error) { + r := []t.ApiWeightItem{} + err := commonFakeData(&r) + return r, err +} + func (d *DummyService) GetHealthz(ctx context.Context, showAll bool) t.HealthzData { r, _ := getDummyData[t.HealthzData]() return r diff --git a/backend/pkg/api/data_access/ratelimit.go b/backend/pkg/api/data_access/ratelimit.go new file mode 100644 index 000000000..8c17c5d0f --- /dev/null +++ b/backend/pkg/api/data_access/ratelimit.go @@ -0,0 +1,22 @@ +package dataaccess + +import ( + "context" + + "github.com/gobitfly/beaconchain/pkg/api/types" +) + +type RatelimitRepository interface { + GetApiWeights(ctx context.Context) ([]types.ApiWeightItem, error) + // TODO @patrick: move queries from commons/ratelimit/ratelimit.go to here +} + +func (d *DataAccessService) GetApiWeights(ctx context.Context) ([]types.ApiWeightItem, error) { + var result []types.ApiWeightItem + err := d.userReader.SelectContext(ctx, &result, ` + SELECT bucket, endpoint, method, weight + FROM api_weights + WHERE valid_from <= NOW() + `) + return result, err +} diff --git a/backend/pkg/api/docs/static.go b/backend/pkg/api/docs/static.go new file mode 100644 index 000000000..93087d82f --- /dev/null +++ b/backend/pkg/api/docs/static.go @@ -0,0 +1,6 @@ +package docs + +import "embed" + +//go:embed * +var Files embed.FS diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index ecc9794db..3a895c481 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -528,8 +528,7 @@ func checkEnum[T enums.EnumFactory[T]](v *validationError, enumString string, na } // checkEnumIsAllowed checks if the given enum is in the list of allowed enums. -// precondition: the enum is the same type as the allowed enums. -func (v *validationError) checkEnumIsAllowed(enum enums.Enum, allowed []enums.Enum, name string) { +func checkEnumIsAllowed[T enums.EnumFactory[T]](v *validationError, enum T, allowed []T, name string) { if enums.IsInvalidEnum(enum) { v.add(name, "parameter is missing or invalid, please check the API documentation") return diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index d111ab7ca..be1d96ecb 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -26,6 +26,21 @@ func (h *HandlerService) InternalGetProductSummary(w http.ResponseWriter, r *htt returnOk(w, r, response) } +// -------------------------------------- +// API Ratelimit Weights + +func (h *HandlerService) InternalGetRatelimitWeights(w http.ResponseWriter, r *http.Request) { + data, err := h.dai.GetApiWeights(r.Context()) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetRatelimitWeightsResponse{ + Data: data, + } + returnOk(w, r, response) +} + // -------------------------------------- // Latest State @@ -85,7 +100,7 @@ func (h *HandlerService) InternalPostAdConfigurations(w http.ResponseWriter, r * handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -131,7 +146,7 @@ func (h *HandlerService) InternalGetAdConfigurations(w http.ResponseWriter, r *h handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -161,7 +176,7 @@ func (h *HandlerService) InternalPutAdConfiguration(w http.ResponseWriter, r *ht handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -207,7 +222,7 @@ func (h *HandlerService) InternalDeleteAdConfiguration(w http.ResponseWriter, r handleErr(w, r, err) return } - if user.UserGroup != "ADMIN" { + if user.UserGroup != types.UserGroupAdmin { returnForbidden(w, r, errors.New("user is not an admin")) return } @@ -1311,3 +1326,7 @@ func (h *HandlerService) InternalGetSlotBlobs(w http.ResponseWriter, r *http.Req } returnOk(w, r, response) } + +func (h *HandlerService) ReturnOk(w http.ResponseWriter, r *http.Request) { + returnOk(w, r, nil) +} diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 3e80c2769..a06e798c3 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -17,6 +17,25 @@ import ( // Public handlers may only be authenticated by an API key // Public handlers must never call internal handlers +// @title beaconcha.in API +// @version 2.0 +// @description To authenticate your API request beaconcha.in uses API Keys. Set your API Key either by: +// @description - Setting the `Authorization` header in the following format: `Authorization: Bearer `. (recommended) +// @description - Setting the URL query parameter in the following format: `api_key={your_api_key}`.\ +// @description Example: `https://beaconcha.in/api/v2/example?field=value&api_key={your_api_key}` + +// @host beaconcha.in +// @BasePath /api/v2 + +// @securitydefinitions.apikey ApiKeyInHeader +// @in header +// @name Authorization +// @description Use your API key as a Bearer token, e.g. `Bearer ` + +// @securitydefinitions.apikey ApiKeyInQuery +// @in query +// @name api_key + func (h *HandlerService) PublicGetHealthz(w http.ResponseWriter, r *http.Request) { var v validationError showAll := v.checkBool(r.URL.Query().Get("show_all"), "show_all") @@ -112,6 +131,18 @@ func (h *HandlerService) PublicPutAccountDashboardTransactionsSettings(w http.Re returnOk(w, r, nil) } +// PublicPostValidatorDashboards godoc +// +// @Description Create a new validator dashboard. **Note**: New dashboards will automatically have a default group created. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboards +// @Accept json +// @Produce json +// @Param request body handlers.PublicPostValidatorDashboards.request true "`name`: Specify the name of the dashboard.
`network`: Specify the network for the dashboard. Possible options are:
  • `ethereum`
  • `gnosis`
" +// @Success 201 {object} types.ApiDataResponse[types.VDBPostReturnData] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their dashboard limit." +// @Router /validator-dashboards [post] func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r *http.Request) { var v validationError userId, err := GetUserIdByContext(r) @@ -792,8 +823,8 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") + allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + checkEnumIsAllowed(&v, period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -828,8 +859,8 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response groupId := v.checkGroupId(vars["group_id"], forbidEmpty) period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") + allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + checkEnumIsAllowed(&v, period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -897,8 +928,8 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res duty := checkEnum[enums.ValidatorDuty](&v, q.Get("duty"), "duty") period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.Enum{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - v.checkEnumIsAllowed(period, allowedPeriods, "period") + allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + checkEnumIsAllowed(&v, period, allowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 0b3d6a819..5338b5ccd 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -5,6 +5,7 @@ import ( "regexp" dataaccess "github.com/gobitfly/beaconchain/pkg/api/data_access" + "github.com/gobitfly/beaconchain/pkg/api/docs" handlers "github.com/gobitfly/beaconchain/pkg/api/handlers" "github.com/gobitfly/beaconchain/pkg/commons/log" "github.com/gobitfly/beaconchain/pkg/commons/metrics" @@ -39,6 +40,8 @@ func NewApiRouter(dataAccessor dataaccess.DataAccessor, cfg *types.Config) *mux. addRoutes(handlerService, publicRouter, internalRouter, cfg) + // serve static files + publicRouter.PathPrefix("/docs/").Handler(http.StripPrefix("/api/v2/docs/", http.FileServer(http.FS(docs.Files)))) router.Use(metrics.HttpMiddleware) return router @@ -88,6 +91,8 @@ func addRoutes(hs *handlers.HandlerService, publicRouter, internalRouter *mux.Ro {http.MethodGet, "/healthz", hs.PublicGetHealthz, nil}, {http.MethodGet, "/healthz-loadbalancer", hs.PublicGetHealthzLoadbalancer, nil}, + {http.MethodGet, "/ratelimit-weights", nil, hs.InternalGetRatelimitWeights}, + {http.MethodPost, "/login", nil, hs.InternalPostLogin}, {http.MethodGet, "/mobile/authorize", nil, hs.InternalPostMobileAuthorize}, diff --git a/backend/pkg/api/types/data_access.go b/backend/pkg/api/types/data_access.go index 49d88b926..b24a6b76c 100644 --- a/backend/pkg/api/types/data_access.go +++ b/backend/pkg/api/types/data_access.go @@ -212,7 +212,6 @@ type VDBValidatorSummaryChartRow struct { SyncScheduled float64 `db:"sync_scheduled"` } -// ------------------------- // healthz structs type HealthzResult struct { diff --git a/backend/pkg/api/types/ratelimit.go b/backend/pkg/api/types/ratelimit.go new file mode 100644 index 000000000..6a1155096 --- /dev/null +++ b/backend/pkg/api/types/ratelimit.go @@ -0,0 +1,10 @@ +package types + +type ApiWeightItem struct { + Bucket string `db:"bucket"` + Endpoint string `db:"endpoint"` + Method string `db:"method"` + Weight int `db:"weight"` +} + +type InternalGetRatelimitWeightsResponse ApiDataResponse[[]ApiWeightItem] diff --git a/backend/pkg/commons/ratelimit/ratelimit.go b/backend/pkg/commons/ratelimit/ratelimit.go index 8a79d9a75..a57900bba 100644 --- a/backend/pkg/commons/ratelimit/ratelimit.go +++ b/backend/pkg/commons/ratelimit/ratelimit.go @@ -56,6 +56,7 @@ const ( FallbackRateLimitSecond = 20 // RateLimit per second for when redis is offline FallbackRateLimitBurst = 20 // RateLimit burst for when redis is offline + defaultWeight = 1 // if no weight is set for a route, use this one defaultBucket = "default" // if no bucket is set for a route, use this one statsTruncateDuration = time.Hour * 1 // ratelimit-stats are truncated to this duration @@ -951,7 +952,7 @@ func getWeight(r *http.Request) (cost int64, identifier, bucket string) { bucket, bucketOk := buckets[route] weightsMu.RUnlock() if !weightOk { - weight = 1 + weight = defaultWeight } if !bucketOk { bucket = defaultBucket diff --git a/backend/pkg/commons/utils/config.go b/backend/pkg/commons/utils/config.go index 1cc3179eb..ad8529fd0 100644 --- a/backend/pkg/commons/utils/config.go +++ b/backend/pkg/commons/utils/config.go @@ -262,6 +262,7 @@ func ReadConfig(cfg *types.Config, path string) error { "mainCurrency": cfg.Frontend.MainCurrency, }, "did init config") + Config = cfg return nil } diff --git a/frontend/types/api/ratelimit.ts b/frontend/types/api/ratelimit.ts new file mode 100644 index 000000000..aec92ffb8 --- /dev/null +++ b/frontend/types/api/ratelimit.ts @@ -0,0 +1,14 @@ +// Code generated by tygo. DO NOT EDIT. +/* eslint-disable */ +import type { ApiDataResponse } from './common' + +////////// +// source: ratelimit.go + +export interface ApiWeightItem { + Bucket: string; + Endpoint: string; + Method: string; + Weight: number /* int */; +} +export type InternalGetRatelimitWeightsResponse = ApiDataResponse; From 9fbcabc81242371e0e02ed107a935f0fc50f90fd Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Tue, 17 Sep 2024 14:21:43 +0200 Subject: [PATCH 131/187] BEDS 322/annotate endpoints (#820) --- backend/pkg/api/data_access/user.go | 7 +- backend/pkg/api/data_access/vdb_management.go | 8 +- backend/pkg/api/handlers/common.go | 2 +- backend/pkg/api/handlers/public.go | 456 +++++++++++++++++- backend/pkg/api/types/common.go | 4 +- backend/pkg/api/types/validator_dashboard.go | 34 +- 6 files changed, 469 insertions(+), 42 deletions(-) diff --git a/backend/pkg/api/data_access/user.go b/backend/pkg/api/data_access/user.go index 598f83995..c7c22d9cf 100644 --- a/backend/pkg/api/data_access/user.go +++ b/backend/pkg/api/data_access/user.go @@ -287,7 +287,10 @@ func (d *DataAccessService) GetUserInfo(ctx context.Context, userId uint64) (*t. }{} err = d.userReader.GetContext(ctx, &result, `SELECT email, COALESCE(user_group, '') as user_group FROM users WHERE id = $1`, userId) if err != nil { - return nil, fmt.Errorf("error getting userEmail for user %v: %w", userId, err) + if errors.Is(err, sql.ErrNoRows) { + return nil, fmt.Errorf("%w: user not found", ErrNotFound) + } + return nil, err } userInfo.Email = result.Email userInfo.UserGroup = result.UserGroup @@ -764,7 +767,7 @@ func (d *DataAccessService) GetUserDashboards(ctx context.Context, userId uint64 err := wg.Wait() if err != nil { - return nil, fmt.Errorf("error retrieving user dashboards data: %v", err) + return nil, fmt.Errorf("error retrieving user dashboards data: %w", err) } // Fill the result diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 9163a51ab..b523103c6 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -148,7 +148,7 @@ func (d *DataAccessService) GetValidatorDashboardInfo(ctx context.Context, dashb err := wg.Wait() if err != nil { - return nil, fmt.Errorf("error retrieving user dashboards data: %v", err) + return nil, fmt.Errorf("error retrieving user dashboards data: %w", err) } return result, nil @@ -329,7 +329,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d validators, err := d.getDashboardValidators(ctx, dashboardId, nil) if err != nil { - return fmt.Errorf("error retrieving validators from dashboard id: %v", err) + return fmt.Errorf("error retrieving validators from dashboard id: %w", err) } if dashboardId.Validators != nil || dashboardId.AggregateGroups { @@ -475,7 +475,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d query, args, err := ds.Prepared(true).ToSQL() if err != nil { - return fmt.Errorf("error preparing query: %v", err) + return fmt.Errorf("error preparing query: %w", err) } err = d.clickhouseReader.GetContext(ctx, &queryResult, query, args...) @@ -511,7 +511,7 @@ func (d *DataAccessService) GetValidatorDashboardOverview(ctx context.Context, d err = eg.Wait() if err != nil { - return nil, fmt.Errorf("error retrieving validator dashboard overview data: %v", err) + return nil, fmt.Errorf("error retrieving validator dashboard overview data: %w", err) } return &data, nil diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index 3a895c481..ca585724b 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -566,7 +566,7 @@ func checkSort[T enums.EnumFactory[T]](v *validationError, sortString string) *t return nil } if len(sortSplit) == 1 { - sortSplit = append(sortSplit, "") + sortSplit = append(sortSplit, ":asc") } sortCol := checkEnum[T](v, sortSplit[0], "sort") order := v.parseSortOrder(sortSplit[1]) diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index a06e798c3..95df23ea7 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -135,7 +135,7 @@ func (h *HandlerService) PublicPutAccountDashboardTransactionsSettings(w http.Re // // @Description Create a new validator dashboard. **Note**: New dashboards will automatically have a default group created. // @Security ApiKeyInHeader || ApiKeyInQuery -// @Tags Validator Dashboards +// @Tags Validator Dashboard // @Accept json // @Produce json // @Param request body handlers.PublicPostValidatorDashboards.request true "`name`: Specify the name of the dashboard.
`network`: Specify the network for the dashboard. Possible options are:
  • `ethereum`
  • `gnosis`
" @@ -193,6 +193,16 @@ func (h *HandlerService) PublicPostValidatorDashboards(w http.ResponseWriter, r returnCreated(w, r, response) } +// PublicGetValidatorDashboards godoc +// +// @Description Get overview information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse "Bad Request" +// @Router /validator-dashboards/{dashboard_id} [get] func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *http.Request) { var v validationError dashboardIdParam := mux.Vars(r)["dashboard_id"] @@ -244,6 +254,16 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h returnOk(w, r, response) } +// PublicPutValidatorDashboard godoc +// +// @Description Delete a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 204 "Dashboard deleted successfully." +// @Failure 400 {object} types.ApiErrorResponse "Bad Request" +// @Router /validator-dashboards/{dashboard_id} [delete] func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -259,6 +279,18 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r returnNoContent(w, r) } +// PublicPutValidatorDashboard godoc +// +// @Description Update the name of a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicPutValidatorDashboardName.request true "request" +// @Success 200 {object} types.ApiDataResponse[types.VDBPostReturnData] +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/name [put] func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -286,6 +318,19 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, returnOk(w, r, response) } +// PublicPostValidatorDashboardGroups godoc +// +// @Description Create a new group in a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardGroups.request true "request" +// @Success 201 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their group limit." +// @Router /validator-dashboards/{dashboard_id}/groups [post] func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -337,6 +382,19 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite returnCreated(w, r, response) } +// PublicGetValidatorDashboardGroups godoc +// +// @Description Update a groups name in a specified validator dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param request body handlers.PublicPutValidatorDashboardGroups.request true "request" +// @Success 200 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id} [put] func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -377,6 +435,18 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter returnOk(w, r, response) } +// PublicDeleteValidatorDashboardGroups godoc +// +// @Description Delete a group in a specified validator dashboard. +// @Tags Validator Dashboard Management +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Success 204 "Group deleted successfully." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id} [delete] func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -408,6 +478,19 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit returnNoContent(w, r) } +// PublicGetValidatorDashboardGroups godoc +// +// @Description Add new validators to a specified dashboard or update the group of already-added validators. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardValidators.request true "`group_id`: (optional) Provide a single group id, to which all validators get added to. If omitted, the default group will be used.

To add validators, only one of the following fields can be set:
  • `validators`: Provide a list of validator indices or public keys to add to the dashboard.
  • `deposit_address`: (limited to subscription tiers with 'Bulk adding') Provide a deposit address from which as many validators as possible will be added to the dashboard.
  • `withdrawal_address`: (limited to subscription tiers with 'Bulk adding') Provide a withdrawal address from which as many validators as possible will be added to the dashboard.
  • `graffiti`: (limited to subscription tiers with 'Bulk adding') Provide a graffiti string from which as many validators as possible will be added to the dashboard.
" +// @Success 201 {object} types.ApiDataResponse[[]types.VDBPostValidatorsData] "Returns a list of added validators." +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their validator limit." +// @Router /validator-dashboards/{dashboard_id}/validators [post] func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -531,6 +614,20 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW returnCreated(w, r, response) } +// PublicGetValidatorDashboardValidators godoc +// +// @Description Get a list of groups in a specified validator dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id query string false "The ID of the group." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(index, public_key, balance, status, withdrawal_credentials) +// @Param search query string false "Search for Address, ENS." +// @Success 200 {object} types.GetValidatorDashboardValidatorsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups [get] func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -558,19 +655,30 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr returnOk(w, r, response) } +// PublicDeleteValidatorDashboardValidators godoc +// +// @Description Remove validators from a specified dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicDeleteValidatorDashboardValidators.request true "`validators`: Provide an array of validator indices or public keys that should get removed from the dashboard." +// @Success 204 "Validators removed successfully." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/validators/bulk-deletions [post] func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - var indices []uint64 - var publicKeys []string - req := struct { + type request struct { Validators []intOrString `json:"validators"` - }{} + } + var req request if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return } - indices, publicKeys = v.checkValidators(req.Validators, false) + indices, publicKeys := v.checkValidators(req.Validators, false) if v.hasErrors() { handleErr(w, r, v) return @@ -589,6 +697,19 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons returnNoContent(w, r) } +// PublicPostValidatorDashboardPublicIds godoc +// +// @Description Create a new public ID for a specified dashboard. This can be used as an ID by other users for non-modyfing (i.e. GET) endpoints only. Currently limited to one per dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicPostValidatorDashboardPublicIds.request true "`name`: Provide a public name for the dashboard
`share_settings`:
  • `share_groups`: If set to `true`, accessing the dashboard through the public ID will not reveal any group information.
" +// @Success 201 {object} types.ApiDataResponse[types.VDBPublicId] +// @Failure 400 {object} types.ApiErrorResponse +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their public ID limit." +// @Router /validator-dashboards/{dashboard_id}/public-ids [post] func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) @@ -623,13 +744,26 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr handleErr(w, r, err) return } - response := types.ApiResponse{ - Data: data, + response := types.ApiDataResponse[types.VDBPublicId]{ + Data: *data, } returnCreated(w, r, response) } +// PublicPutValidatorDashboardPublicId godoc +// +// @Description Update a specified public ID for a specified dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param public_id path string true "The ID of the public ID." +// @Param request body handlers.PublicPutValidatorDashboardPublicId.request true "`name`: Provide a public name for the dashboard
`share_settings`:
  • `share_groups`: If set to `true`, accessing the dashboard through the public ID will not reveal any group information.
" +// @Success 200 {object} types.ApiDataResponse[types.VDBPublicId] +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/public-ids/{public_id} [put] func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -666,13 +800,24 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit handleErr(w, r, err) return } - response := types.ApiResponse{ - Data: data, + response := types.ApiDataResponse[types.VDBPublicId]{ + Data: *data, } returnOk(w, r, response) } +// PublicDeleteValidatorDashboardPublicId godoc +// +// @Description Delete a specified public ID for a specified dashboard. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param public_id path string true "The ID of the public ID." +// @Success 204 "Public ID deleted successfully." +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/public-ids/{public_id} [delete] func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -701,12 +846,26 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW returnNoContent(w, r) } +// PublicPutValidatorDashboardArchiving godoc +// +// @Description Archive or unarchive a specified validator dashboard. Archived dashboards cannot be accessed by other endpoints. Archiving happens automatically if the number of dashboards, validators, or groups exceeds the limit allowed by your subscription plan. For example, this might occur if you downgrade your subscription to a lower tier. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Validator Dashboard Management +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "request" +// @Success 200 {object} types.ApiDataResponse[types.VDBPostArchivingReturnData] +// @Failure 400 {object} types.ApiErrorResponse +// @Conflict 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." +// @Router /validator-dashboards/{dashboard_id}/archiving [put] func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) - req := struct { + type request struct { IsArchived bool `json:"is_archived"` - }{} + } + var req request if err := v.checkBody(&req, r); err != nil { handleErr(w, r, err) return @@ -784,6 +943,16 @@ func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWri returnOk(w, r, response) } +// PublicGetValidatorDashboardSlotViz godoc +// +// @Description Get slot viz information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_ids query string false "Provide a comma separated list of group IDs to filter the results by. If omitted, all groups will be included." +// @Success 200 {object} types.GetValidatorDashboardSlotVizResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/slot-viz [get] func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -809,6 +978,23 @@ func (h *HandlerService) PublicGetValidatorDashboardSlotViz(w http.ResponseWrite returnOk(w, r, response) } +var summaryAllowedPeriods = []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} + +// PublicGetValidatorDashboardSummary godoc +// +// @Description Get summary information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(group_id, validators, efficiency, attestations, proposals, reward) +// @Param search query string false "Search for Index, Public Key, Group." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardSummaryResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/summary [get] func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -823,8 +1009,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite period := checkEnum[enums.TimePeriod](&v, q.Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - checkEnumIsAllowed(&v, period, allowedPeriods, "period") + checkEnumIsAllowed(&v, period, summaryAllowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -842,6 +1027,18 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardGroupSummary godoc +// +// @Description Get summary information for a specified group in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardGroupSummaryResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id}/summary [get] func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -859,8 +1056,7 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response groupId := v.checkGroupId(vars["group_id"], forbidEmpty) period := checkEnum[enums.TimePeriod](&v, r.URL.Query().Get("period"), "period") // allowed periods are: all_time, last_30d, last_7d, last_24h, last_1h - allowedPeriods := []enums.TimePeriod{enums.TimePeriods.AllTime, enums.TimePeriods.Last30d, enums.TimePeriods.Last7d, enums.TimePeriods.Last24h, enums.TimePeriods.Last1h} - checkEnumIsAllowed(&v, period, allowedPeriods, "period") + checkEnumIsAllowed(&v, period, summaryAllowedPeriods, "period") if v.hasErrors() { handleErr(w, r, v) return @@ -877,6 +1073,20 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupSummary(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardSummaryChart godoc +// +// @Description Get summary chart data for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_ids query string false "Provide a comma separated list of group IDs to filter the results by." +// @Param efficiency_type query string false "Efficiency type to get data for." Enums(all, attestation, sync, proposal) +// @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) +// @Param after_ts query string false "Return data after this timestamp." +// @Param before_ts query string false "Return data before this timestamp." +// @Success 200 {object} types.GetValidatorDashboardSummaryChartResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/summary-chart [get] func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.ResponseWriter, r *http.Request) { var v validationError ctx := r.Context() @@ -916,6 +1126,18 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardSummaryValidators godoc +// +// @Description Get summary information for validators in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id query string false "The ID of the group." +// @Param duty query string false "Validator duty to get data for." Enums(none, sync, slashed, proposal) Default(none) +// @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) +// @Success 200 {object} types.GetValidatorDashboardSummaryValidatorsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/summary/validators [get] func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -966,6 +1188,20 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryValidators(w http.Res returnOk(w, r, response) } +// PublicGetValidatorDashboardRewards godoc +// +// @Description Get rewards information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(epoch) +// @Param search query string false "Search for Epoch, Index, Public Key, Group." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardRewardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rewards [get] func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -994,6 +1230,18 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardGroupRewards godoc +// +// @Description Get rewards information for a specified group in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param epoch path string true "The epoch to get data for." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardGroupRewardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id}/rewards/{epoch} [get] func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1022,6 +1270,16 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupRewards(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardRewardsChart godoc +// +// @Description Get rewards chart data for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardRewardsChartResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rewards-chart [get] func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1048,6 +1306,22 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardDuties godoc +// +// @Description Get duties information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param epoch path string true "The epoch to get data for." +// @Param group_id query string false "The ID of the group." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(validator, reward) +// @Param search query string false "Search for Index, Public Key." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardDutiesResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/duties/{epoch} [get] func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1079,6 +1353,20 @@ func (h *HandlerService) PublicGetValidatorDashboardDuties(w http.ResponseWriter returnOk(w, r, response) } +// PublicGetValidatorDashboardBlocks godoc +// +// @Description Get blocks information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(proposer, slot, block, status, reward) +// @Param search query string false "Search for Index, Public Key, Group." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardBlocksResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/blocks [get] func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1107,6 +1395,19 @@ func (h *HandlerService) PublicGetValidatorDashboardBlocks(w http.ResponseWriter returnOk(w, r, response) } +// PublicGetValidatorDashboardHeatmap godoc +// +// @Description Get heatmap information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) +// @Param after_ts query string false "Return data after this timestamp." +// @Param before_ts query string false "Return data before this timestamp." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardHeatmapResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/heatmap [get] func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1143,6 +1444,19 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite returnOk(w, r, response) } +// PublicGetValidatorDashboardGroupHeatmap godoc +// +// @Description Get heatmap information for a specified group in a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param timestamp path string true "The timestamp to get data for." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) +// @Success 200 {object} types.GetValidatorDashboardGroupHeatmapResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/groups/{group_id}/heatmap/{timestamp} [get] func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1180,6 +1494,17 @@ func (h *HandlerService) PublicGetValidatorDashboardGroupHeatmap(w http.Response returnOk(w, r, response) } +// PublicGetValidatorDashboardExecutionLayerDeposits godoc +// +// @Description Get execution layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Success 200 {object} types.GetValidatorDashboardExecutionLayerDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/execution-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1205,6 +1530,17 @@ func (h *HandlerService) PublicGetValidatorDashboardExecutionLayerDeposits(w htt returnOk(w, r, response) } +// PublicGetValidatorDashboardConsensusLayerDeposits godoc +// +// @Description Get consensus layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Success 200 {object} types.GetValidatorDashboardConsensusLayerDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/consensus-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { var v validationError dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1231,6 +1567,15 @@ func (h *HandlerService) PublicGetValidatorDashboardConsensusLayerDeposits(w htt returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalConsensusLayerDeposits godoc +// +// @Description Get total consensus layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 200 {object} types.GetValidatorDashboardTotalConsensusDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-consensus-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits(w http.ResponseWriter, r *http.Request) { var err error dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1250,6 +1595,15 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalConsensusLayerDeposits( returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalExecutionLayerDeposits godoc +// +// @Description Get total execution layer deposits information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 200 {object} types.GetValidatorDashboardTotalExecutionDepositsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-execution-layer-deposits [get] func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits(w http.ResponseWriter, r *http.Request) { var err error dashboardId, err := h.handleDashboardId(r.Context(), mux.Vars(r)["dashboard_id"]) @@ -1269,6 +1623,20 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalExecutionLayerDeposits( returnOk(w, r, response) } +// PublicGetValidatorDashboardWithdrawals godoc +// +// @Description Get withdrawals information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(epoch, slot, index, recipient, amount) +// @Param search query string false "Search for Index, Public Key, Address." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardWithdrawalsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/withdrawals [get] func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1297,6 +1665,16 @@ func (h *HandlerService) PublicGetValidatorDashboardWithdrawals(w http.ResponseW returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalWithdrawals godoc +// +// @Description Get total withdrawals information for a specified dashboard +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." +// @Success 200 {object} types.GetValidatorDashboardTotalWithdrawalsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-withdrawals [get] func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1324,6 +1702,19 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalWithdrawals(w http.Resp returnOk(w, r, response) } +// PublicGetValidatorDashboardRocketPool godoc +// +// @Description Get an aggregated list of the Rocket Pool nodes details associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(node, minipools, collateral, rpl, effective_rpl, rpl_apr, smoothing_pool) +// @Param search query string false "Search for Node address." +// @Success 200 {object} types.GetValidatorDashboardRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rocket-pool [get] func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1351,6 +1742,15 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPool(w http.ResponseWr returnOk(w, r, response) } +// PublicGetValidatorDashboardTotalRocketPool godoc +// +// @Description Get a summary of all Rocket Pool nodes details associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Success 200 {object} types.GetValidatorDashboardTotalRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/total-rocket-pool [get] func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.ResponseWriter, r *http.Request) { var v validationError q := r.URL.Query() @@ -1376,6 +1776,16 @@ func (h *HandlerService) PublicGetValidatorDashboardTotalRocketPool(w http.Respo returnOk(w, r, response) } +// PublicGetValidatorDashboardNodeRocketPool godoc +// +// @Description Get details for a specific Rocket Pool node associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param node_address path string true "The address of the node." +// @Success 200 {object} types.GetValidatorDashboardNodeRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rocket-pool/{node_address} [get] func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) @@ -1402,6 +1812,20 @@ func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.Respon returnOk(w, r, response) } +// PublicGetValidatorDashboardRocketPoolMinipools godoc +// +// @Description Get minipools information for a specified Rocket Pool node associated with a specified dashboard. +// @Tags Validator Dashboard +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param node_address path string true "The address of the node." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order. Possible values are TODO." +// @Param search query string false "Search for Index, Node." +// @Success 200 {object} types.GetValidatorDashboardRocketPoolMinipoolsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /validator-dashboards/{dashboard_id}/rocket-pool/{node_address}/minipools [get] func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.ResponseWriter, r *http.Request) { var v validationError vars := mux.Vars(r) diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index e658afa80..2c7253623 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -40,8 +40,8 @@ type Address struct { type LuckItem struct { Percent float64 `json:"percent"` - Expected time.Time `json:"expected"` - Average time.Duration `json:"average"` + Expected time.Time `json:"expected" swaggertype:"string" format:"date-time"` + Average time.Duration `json:"average" swaggertype:"primitive,integer"` } type Luck struct { diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go index 39d5e5d0d..380d035d4 100644 --- a/backend/pkg/api/types/validator_dashboard.go +++ b/backend/pkg/api/types/validator_dashboard.go @@ -27,7 +27,7 @@ type VDBOverviewBalances struct { } type VDBOverviewData struct { - Name string `json:"name,omitempty"` + Name string `json:"name,omitempty" extensions:"x-order=1"` Network uint64 `json:"network"` Groups []VDBOverviewGroup `json:"groups"` Validators VDBOverviewValidators `json:"validators"` @@ -60,7 +60,7 @@ type VDBSummaryValidators struct { } type VDBSummaryTableRow struct { - GroupId int64 `json:"group_id"` + GroupId int64 `json:"group_id" extensions:"x-order=1"` Status VDBSummaryStatus `json:"status"` Validators VDBSummaryValidators `json:"validators"` Efficiency float64 `json:"efficiency"` @@ -116,7 +116,7 @@ type GetValidatorDashboardSummaryChartResponse ApiDataResponse[ChartData[int, fl // ------------------------------------------------------------ // Summary Validators type VDBSummaryValidator struct { - Index uint64 `json:"index"` + Index uint64 `json:"index" extensions:"x-order=1"` DutyObjects []uint64 `json:"duty_objects,omitempty"` } type VDBSummaryValidatorsData struct { @@ -169,7 +169,7 @@ type GetValidatorDashboardRewardsChartResponse ApiDataResponse[ChartData[int, de // Duties Modal type VDBEpochDutiesTableRow struct { - Validator uint64 `json:"validator"` + Validator uint64 `json:"validator" extensions:"x-order=1"` Duties ValidatorHistoryDuties `json:"duties"` } type GetValidatorDashboardDutiesResponse ApiPagingResponse[VDBEpochDutiesTableRow] @@ -177,12 +177,12 @@ type GetValidatorDashboardDutiesResponse ApiPagingResponse[VDBEpochDutiesTableRo // ------------------------------------------------------------ // Blocks Tab type VDBBlocksTableRow struct { - Proposer uint64 `json:"proposer"` - GroupId uint64 `json:"group_id"` - Epoch uint64 `json:"epoch"` - Slot uint64 `json:"slot"` + Proposer uint64 `json:"proposer" extensions:"x-order=1"` + GroupId uint64 `json:"group_id" extensions:"x-order=2"` + Epoch uint64 `json:"epoch" extensions:"x-order=3"` + Slot uint64 `json:"slot" extensions:"x-order=4"` + Block *uint64 `json:"block,omitempty" extensions:"x-order=5"` Status string `json:"status" tstype:"'success' | 'missed' | 'orphaned' | 'scheduled'" faker:"oneof: success, missed, orphaned, scheduled"` - Block *uint64 `json:"block,omitempty"` RewardRecipient *Address `json:"reward_recipient,omitempty"` Reward *ClElValue[decimal.Decimal] `json:"reward,omitempty"` Graffiti *string `json:"graffiti,omitempty"` @@ -198,22 +198,22 @@ type VDBHeatmapEvents struct { Sync bool `json:"sync"` } type VDBHeatmapCell struct { - X int64 `json:"x"` // Timestamp - Y uint64 `json:"y"` // Group ID + X int64 `json:"x" extensions:"x-order=1"` // Timestamp + Y uint64 `json:"y" extensions:"x-order=2"` // Group ID - Value float64 `json:"value"` // Attestaton Rewards + Value float64 `json:"value" extensions:"x-order=3"` // Attestaton Rewards Events *VDBHeatmapEvents `json:"events,omitempty"` } type VDBHeatmap struct { - Timestamps []int64 `json:"timestamps"` // X-Axis Categories (unix timestamp) - GroupIds []uint64 `json:"group_ids"` // Y-Axis Categories - Data []VDBHeatmapCell `json:"data"` + Timestamps []int64 `json:"timestamps" extensions:"x-order=1"` // X-Axis Categories (unix timestamp) + GroupIds []uint64 `json:"group_ids" extensions:"x-order=2"` // Y-Axis Categories + Data []VDBHeatmapCell `json:"data" extensions:"x-order=3"` Aggregation string `json:"aggregation" tstype:"'epoch' | 'hourly' | 'daily' | 'weekly'" faker:"oneof: epoch, hourly, daily, weekly"` } type GetValidatorDashboardHeatmapResponse ApiDataResponse[VDBHeatmap] type VDBHeatmapTooltipData struct { - Timestamp int64 `json:"timestamp"` + Timestamp int64 `json:"timestamp" extensions:"x-order=1"` Proposers StatusCount `json:"proposers"` Syncs uint64 `json:"syncs"` @@ -290,7 +290,7 @@ type GetValidatorDashboardTotalWithdrawalsResponse ApiDataResponse[VDBTotalWithd // ------------------------------------------------------------ // Rocket Pool Tab type VDBRocketPoolTableRow struct { - Node Address `json:"node"` + Node Address `json:"node" extensions:"x-order=1"` Staked struct { Eth decimal.Decimal `json:"eth"` Rpl decimal.Decimal `json:"rpl"` From 9d1196c558e4fc2ba69ff28500aadda108be1d6f Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Wed, 18 Sep 2024 09:29:09 +0200 Subject: [PATCH 132/187] (BEDS-464) annotate notification endpoint doc (#864) --- backend/pkg/api/data_access/dummy.go | 4 +- backend/pkg/api/data_access/notifications.go | 12 +- .../api/enums/validator_dashboard_enums.go | 2 +- backend/pkg/api/handlers/internal.go | 419 +---------- backend/pkg/api/handlers/public.go | 701 +++++++++++++++++- backend/pkg/api/router.go | 39 +- 6 files changed, 743 insertions(+), 434 deletions(-) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 4590a106b..1dc80125d 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -452,11 +452,11 @@ func (d *DummyService) GetDashboardNotifications(ctx context.Context, userId uin return getDummyWithPaging[t.NotificationDashboardsTableRow]() } -func (d *DummyService) GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) { +func (d *DummyService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64) (*t.NotificationValidatorDashboardDetail, error) { return getDummyStruct[t.NotificationValidatorDashboardDetail]() } -func (d *DummyService) GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) { +func (d *DummyService) GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64) (*t.NotificationAccountDashboardDetail, error) { return getDummyStruct[t.NotificationAccountDashboardDetail]() } diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 4859b770c..94530d441 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -12,8 +12,8 @@ type NotificationsRepository interface { GetDashboardNotifications(ctx context.Context, userId uint64, chainId uint64, cursor string, colSort t.Sort[enums.NotificationDashboardsColumn], search string, limit uint64) ([]t.NotificationDashboardsTableRow, *t.Paging, error) // depending on how notifications are implemented, we may need to use something other than `notificationId` for identifying the notification - GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) - GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) + GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64) (*t.NotificationValidatorDashboardDetail, error) + GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64) (*t.NotificationAccountDashboardDetail, error) GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) GetClientNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationClientsColumn], search string, limit uint64) ([]t.NotificationClientsTableRow, *t.Paging, error) @@ -37,12 +37,12 @@ func (d *DataAccessService) GetDashboardNotifications(ctx context.Context, userI return d.dummy.GetDashboardNotifications(ctx, userId, chainId, cursor, colSort, search, limit) } -func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationValidatorDashboardDetail, error) { - return d.dummy.GetValidatorDashboardNotificationDetails(ctx, notificationId) +func (d *DataAccessService) GetValidatorDashboardNotificationDetails(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, epoch uint64) (*t.NotificationValidatorDashboardDetail, error) { + return d.dummy.GetValidatorDashboardNotificationDetails(ctx, dashboardId, groupId, epoch) } -func (d *DataAccessService) GetAccountDashboardNotificationDetails(ctx context.Context, notificationId string) (*t.NotificationAccountDashboardDetail, error) { - return d.dummy.GetAccountDashboardNotificationDetails(ctx, notificationId) +func (d *DataAccessService) GetAccountDashboardNotificationDetails(ctx context.Context, dashboardId uint64, groupId uint64, epoch uint64) (*t.NotificationAccountDashboardDetail, error) { + return d.dummy.GetAccountDashboardNotificationDetails(ctx, dashboardId, groupId, epoch) } func (d *DataAccessService) GetMachineNotifications(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationMachinesColumn], search string, limit uint64) ([]t.NotificationMachinesTableRow, *t.Paging, error) { diff --git a/backend/pkg/api/enums/validator_dashboard_enums.go b/backend/pkg/api/enums/validator_dashboard_enums.go index 928d5742c..fcbfec71b 100644 --- a/backend/pkg/api/enums/validator_dashboard_enums.go +++ b/backend/pkg/api/enums/validator_dashboard_enums.go @@ -437,7 +437,7 @@ func (c VDBRocketPoolMinipoolsColumn) Int() int { func (VDBRocketPoolMinipoolsColumn) NewFromString(s string) VDBRocketPoolMinipoolsColumn { switch s { - case "group": + case "group_id": return VDBRocketPoolMinipoolsGroup default: return VDBRocketPoolMinipoolsColumn(-1) diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index be1d96ecb..0a0f5c006 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -2,7 +2,6 @@ package handlers import ( "errors" - "math" "net/http" "github.com/gobitfly/beaconchain/pkg/api/enums" @@ -526,459 +525,79 @@ func (h *HandlerService) InternalPostMobileBundleDeliveries(w http.ResponseWrite // Notifications func (h *HandlerService) InternalGetUserNotifications(w http.ResponseWriter, r *http.Request) { - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetNotificationOverview(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotifications(w, r) } func (h *HandlerService) InternalGetUserNotificationDashboards(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) - chainId := v.checkNetworkParameter(q.Get("network")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetDashboardNotifications(r.Context(), userId, chainId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationDashboardsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationDashboards(w, r) } func (h *HandlerService) InternalGetUserNotificationsValidatorDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - notificationId := v.checkRegex(reNonEmpty, mux.Vars(r)["notification_id"], "notification_id") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, err := h.dai.GetValidatorDashboardNotificationDetails(r.Context(), notificationId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationsValidatorDashboardResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotificationsValidatorDashboard(w, r) } func (h *HandlerService) InternalGetUserNotificationsAccountDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - notificationId := v.checkRegex(reNonEmpty, mux.Vars(r)["notification_id"], "notification_id") - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, err := h.dai.GetAccountDashboardNotificationDetails(r.Context(), notificationId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationsAccountDashboardResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotificationsAccountDashboard(w, r) } func (h *HandlerService) InternalGetUserNotificationMachines(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationMachinesColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationMachinesResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationMachines(w, r) } func (h *HandlerService) InternalGetUserNotificationClients(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationClientsColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationClientsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationClients(w, r) } func (h *HandlerService) InternalGetUserNotificationRocketPool(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationRocketPoolColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationRocketPoolResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationRocketPool(w, r) } func (h *HandlerService) InternalGetUserNotificationNetworks(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationNetworksColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationNetworksResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationNetworks(w, r) } func (h *HandlerService) InternalGetUserNotificationSettings(w http.ResponseWriter, r *http.Request) { - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - data, err := h.dai.GetNotificationSettings(r.Context(), userId) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationSettingsResponse{ - Data: *data, - } - returnOk(w, r, response) + h.PublicGetUserNotificationSettings(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsGeneral(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - var req types.NotificationSettingsGeneral - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - checkMinMax(&v, req.MachineStorageUsageThreshold, 0, 1, "machine_storage_usage_threshold") - checkMinMax(&v, req.MachineCpuUsageThreshold, 0, 1, "machine_cpu_usage_threshold") - checkMinMax(&v, req.MachineMemoryUsageThreshold, 0, 1, "machine_memory_usage_threshold") - checkMinMax(&v, req.RocketPoolMaxCollateralThreshold, 0, 1, "rocket_pool_max_collateral_threshold") - checkMinMax(&v, req.RocketPoolMinCollateralThreshold, 0, 1, "rocket_pool_min_collateral_threshold") - // TODO: check validity of clients - if v.hasErrors() { - handleErr(w, r, v) - return - } - err = h.dai.UpdateNotificationSettingsGeneral(r.Context(), userId, req) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsGeneralResponse{ - Data: req, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsGeneral(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsNetworks(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - var req types.NotificationSettingsNetwork - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - checkMinMax(&v, req.ParticipationRateThreshold, 0, 1, "participation_rate_threshold") - - chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, req) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsNetworksResponse{ - Data: types.NotificationNetwork{ - ChainId: chainId, - Settings: req, - }, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsNetworks(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { - var v validationError - req := struct { - Name string `json:"name,omitempty"` - IsNotificationsEnabled bool `json:"is_notifications_enabled"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - // TODO use a better way to validate the paired device id - pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") - name := v.checkNameNotEmpty(req.Name) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), pairedDeviceId, name, req.IsNotificationsEnabled) - if err != nil { - handleErr(w, r, err) - return - } - // TODO timestamp - response := types.InternalPutUserNotificationSettingsPairedDevicesResponse{ - Data: types.NotificationPairedDevice{ - Id: pairedDeviceId, - Name: req.Name, - IsNotificationsEnabled: req.IsNotificationsEnabled, - }, - } - - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsPairedDevices(w, r) } func (h *HandlerService) InternalDeleteUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { - var v validationError - // TODO use a better way to validate the paired device id - pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), pairedDeviceId) - if err != nil { - handleErr(w, r, err) - return - } - returnNoContent(w, r) + h.PublicDeleteUserNotificationSettingsPairedDevices(w, r) } func (h *HandlerService) InternalGetUserNotificationSettingsDashboards(w http.ResponseWriter, r *http.Request) { - var v validationError - userId, err := GetUserIdByContext(r) - if err != nil { - handleErr(w, r, err) - return - } - q := r.URL.Query() - pagingParams := v.checkPagingParams(q) - sort := checkSort[enums.NotificationSettingsDashboardColumn](&v, q.Get("sort")) - if v.hasErrors() { - handleErr(w, r, v) - return - } - data, paging, err := h.dai.GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalGetUserNotificationSettingsDashboardsResponse{ - Data: data, - Paging: *paging, - } - returnOk(w, r, response) + h.PublicGetUserNotificationSettingsDashboards(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsValidatorDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - var req types.NotificationSettingsValidatorDashboard - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - checkMinMax(&v, req.GroupOfflineThreshold, 0, 1, "group_offline_threshold") - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) - groupId := v.checkExistingGroupId(vars["group_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - err := h.dai.UpdateNotificationSettingsValidatorDashboard(r.Context(), dashboardId, groupId, req) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsValidatorDashboardResponse{ - Data: req, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsValidatorDashboard(w, r) } func (h *HandlerService) InternalPutUserNotificationSettingsAccountDashboard(w http.ResponseWriter, r *http.Request) { - var v validationError - req := struct { - WebhookUrl string `json:"webhook_url"` - IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled"` - IsIgnoreSpamTransactionsEnabled bool `json:"is_ignore_spam_transactions_enabled"` - SubscribedChainIds []intOrString `json:"subscribed_chain_ids"` - - IsIncomingTransactionsSubscribed bool `json:"is_incoming_transactions_subscribed"` - IsOutgoingTransactionsSubscribed bool `json:"is_outgoing_transactions_subscribed"` - IsERC20TokenTransfersSubscribed bool `json:"is_erc20_token_transfers_subscribed"` - ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold"` // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled - IsERC721TokenTransfersSubscribed bool `json:"is_erc721_token_transfers_subscribed"` - IsERC1155TokenTransfersSubscribed bool `json:"is_erc1155_token_transfers_subscribed"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - chainIdMap := v.checkNetworkSlice(req.SubscribedChainIds) - // convert to uint64[] slice - chainIds := make([]uint64, len(chainIdMap)) - i := 0 - for k := range chainIdMap { - chainIds[i] = k - i++ - } - checkMinMax(&v, req.ERC20TokenTransfersValueThreshold, 0, math.MaxFloat64, "group_offline_threshold") - vars := mux.Vars(r) - dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) - groupId := v.checkExistingGroupId(vars["group_id"]) - if v.hasErrors() { - handleErr(w, r, v) - return - } - settings := types.NotificationSettingsAccountDashboard{ - WebhookUrl: req.WebhookUrl, - IsWebhookDiscordEnabled: req.IsWebhookDiscordEnabled, - IsIgnoreSpamTransactionsEnabled: req.IsIgnoreSpamTransactionsEnabled, - SubscribedChainIds: chainIds, - - IsIncomingTransactionsSubscribed: req.IsIncomingTransactionsSubscribed, - IsOutgoingTransactionsSubscribed: req.IsOutgoingTransactionsSubscribed, - IsERC20TokenTransfersSubscribed: req.IsERC20TokenTransfersSubscribed, - ERC20TokenTransfersValueThreshold: req.ERC20TokenTransfersValueThreshold, - IsERC721TokenTransfersSubscribed: req.IsERC721TokenTransfersSubscribed, - IsERC1155TokenTransfersSubscribed: req.IsERC1155TokenTransfersSubscribed, - } - err := h.dai.UpdateNotificationSettingsAccountDashboard(r.Context(), dashboardId, groupId, settings) - if err != nil { - handleErr(w, r, err) - return - } - response := types.InternalPutUserNotificationSettingsAccountDashboardResponse{ - Data: settings, - } - returnOk(w, r, response) + h.PublicPutUserNotificationSettingsAccountDashboard(w, r) } func (h *HandlerService) InternalPostUserNotificationsTestEmail(w http.ResponseWriter, r *http.Request) { - // TODO - returnOk(w, r, nil) + h.PublicPostUserNotificationsTestEmail(w, r) } func (h *HandlerService) InternalPostUserNotificationsTestPush(w http.ResponseWriter, r *http.Request) { - // TODO - returnOk(w, r, nil) + h.PublicPostUserNotificationsTestPush(w, r) } func (h *HandlerService) InternalPostUserNotificationsTestWebhook(w http.ResponseWriter, r *http.Request) { - var v validationError - req := struct { - WebhookUrl string `json:"webhook_url"` - IsDiscordWebhookEnabled bool `json:"is_discord_webhook_enabled,omitempty"` - }{} - if err := v.checkBody(&req, r); err != nil { - handleErr(w, r, err) - return - } - if v.hasErrors() { - handleErr(w, r, v) - return - } - // TODO - returnOk(w, r, nil) + h.PublicPostUserNotificationsTestWebhook(w, r) } // -------------------------------------- diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index 95df23ea7..a31ad6bb2 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math" "net/http" "reflect" "time" @@ -36,6 +37,8 @@ import ( // @in query // @name api_key +// @Validator Dashboard Management.n + func (h *HandlerService) PublicGetHealthz(w http.ResponseWriter, r *http.Request) { var v validationError showAll := v.checkBool(r.URL.Query().Get("show_all"), "show_all") @@ -135,7 +138,7 @@ func (h *HandlerService) PublicPutAccountDashboardTransactionsSettings(w http.Re // // @Description Create a new validator dashboard. **Note**: New dashboards will automatically have a default group created. // @Security ApiKeyInHeader || ApiKeyInQuery -// @Tags Validator Dashboard +// @Tags Validator Dashboard Management // @Accept json // @Produce json // @Param request body handlers.PublicPostValidatorDashboards.request true "`name`: Specify the name of the dashboard.
`network`: Specify the network for the dashboard. Possible options are:
  • `ethereum`
  • `gnosis`
" @@ -621,7 +624,6 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." // @Param group_id query string false "The ID of the group." -// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." // @Param limit query string false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(index, public_key, balance, status, withdrawal_credentials) // @Param search query string false "Search for Address, ENS." @@ -662,8 +664,8 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param request body handlers.PublicDeleteValidatorDashboardValidators.request true "`validators`: Provide an array of validator indices or public keys that should get removed from the dashboard." +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param request body handlers.PublicDeleteValidatorDashboardValidators.request true "`validators`: Provide an array of validator indices or public keys that should get removed from the dashboard." // @Success 204 "Validators removed successfully." // @Failure 400 {object} types.ApiErrorResponse // @Router /validator-dashboards/{dashboard_id}/validators/bulk-deletions [post] @@ -857,7 +859,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW // @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "request" // @Success 200 {object} types.ApiDataResponse[types.VDBPostArchivingReturnData] // @Failure 400 {object} types.ApiErrorResponse -// @Conflict 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." +// @Conflict 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." // @Router /validator-dashboards/{dashboard_id}/archiving [put] func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { var v validationError @@ -1821,7 +1823,7 @@ func (h *HandlerService) PublicGetValidatorDashboardNodeRocketPool(w http.Respon // @Param node_address path string true "The address of the node." // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." // @Param limit query string false "The maximum number of results that may be returned." -// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order. Possible values are TODO." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(group_id) // @Param search query string false "Search for Index, Node." // @Success 200 {object} types.GetValidatorDashboardRocketPoolMinipoolsResponse // @Failure 400 {object} types.ApiErrorResponse @@ -1856,6 +1858,693 @@ func (h *HandlerService) PublicGetValidatorDashboardRocketPoolMinipools(w http.R returnOk(w, r, response) } +// ---------------------------------------------- +// Notifications +// ---------------------------------------------- + +// PublicGetUserNotifications godoc +// +// @Description Get an overview of your recent notifications. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Success 200 {object} types.InternalGetUserNotificationsResponse +// @Router /users/me/notifications [get] +func (h *HandlerService) PublicGetUserNotifications(w http.ResponseWriter, r *http.Request) { + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.dai.GetNotificationOverview(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationsResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationDashboards godoc +// +// @Description Get a list of triggered notifications related to your dashboards. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param network query string false "If set, results will be filtered to only include networks given. Provide a comma separated list." +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." " Enums(chain_id, timestamp, dashboard_id) +// @Param search query string false "Search for Dashboard, Group" +// @Success 200 {object} types.InternalGetUserNotificationDashboardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/dashboards [get] +func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationDashboardsColumn](&v, q.Get("sort")) + chainId := v.checkNetworkParameter(q.Get("network")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetDashboardNotifications(r.Context(), userId, chainId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationDashboardsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationValidators godoc +// +// @Description Get a detailed view of a triggered notification related to a validator dashboard group at a specific epoch. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param epoch path string true "The epoch of the notification." +// @Success 200 {object} types.InternalGetUserNotificationsValidatorDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/validator-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] +func (h *HandlerService) PublicGetUserNotificationsValidatorDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + epoch := v.checkUint(vars["epoch"], "epoch") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.dai.GetValidatorDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationsValidatorDashboardResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationsAccountDashboard godoc +// +// @Description Get a detailed view of a triggered notification related to an account dashboard group at a specific epoch. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param epoch path string true "The epoch of the notification." +// @Success 200 {object} types.InternalGetUserNotificationsAccountDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/account-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] +func (h *HandlerService) PublicGetUserNotificationsAccountDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + vars := mux.Vars(r) + dashboardId := v.checkUint(vars["dashboard_id"], "dashboard_id") + groupId := v.checkExistingGroupId(vars["group_id"]) + epoch := v.checkUint(vars["epoch"], "epoch") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.dai.GetAccountDashboardNotificationDetails(r.Context(), dashboardId, groupId, epoch) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationsAccountDashboardResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationMachines godoc +// +// @Description Get a list of triggered notifications related to your machines. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(machine_name, threshold, event_type, timestamp) +// @Param search query string false "Search for Machine" +// @Success 200 {object} types.InternalGetUserNotificationMachinesResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/machines [get] +func (h *HandlerService) PublicGetUserNotificationMachines(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationMachinesColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetMachineNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationMachinesResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationClients godoc +// +// @Description Get a list of triggered notifications related to your clients. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(client_name, timestamp) +// @Param search query string false "Search for Client" +// @Success 200 {object} types.InternalGetUserNotificationClientsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/clients [get] +func (h *HandlerService) PublicGetUserNotificationClients(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationClientsColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetClientNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationClientsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationRocketPool godoc +// +// @Description Get a list of triggered notifications related to Rocket Pool. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type, node_address) +// @Param search query string false "Search for TODO" +// @Success 200 {object} types.InternalGetUserNotificationRocketPoolResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/rocket-pool [get] +func (h *HandlerService) PublicGetUserNotificationRocketPool(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationRocketPoolColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetRocketPoolNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationRocketPoolResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationNetworks godoc +// +// @Description Get a list of triggered notifications related to networks. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notifications +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type) +// @Param search query string false "Search for TODO" +// @Success 200 {object} types.InternalGetUserNotificationNetworksResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/networks [get] +func (h *HandlerService) PublicGetUserNotificationNetworks(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationNetworksColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetNetworkNotifications(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationNetworksResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicGetUserNotificationPairedDevices godoc +// +// @Description Get notification settings for the authenticated user. Excludes dashboard notification settings. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Success 200 {object} types.InternalGetUserNotificationSettingsResponse +// @Router /users/me/notifications/settings [get] +func (h *HandlerService) PublicGetUserNotificationSettings(w http.ResponseWriter, r *http.Request) { + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + data, err := h.dai.GetNotificationSettings(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationSettingsResponse{ + Data: *data, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsGeneral godoc +// +// @Description Update general notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param request body types.NotificationSettingsGeneral true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsGeneralResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/general [put] +func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + var req types.NotificationSettingsGeneral + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + checkMinMax(&v, req.MachineStorageUsageThreshold, 0, 1, "machine_storage_usage_threshold") + checkMinMax(&v, req.MachineCpuUsageThreshold, 0, 1, "machine_cpu_usage_threshold") + checkMinMax(&v, req.MachineMemoryUsageThreshold, 0, 1, "machine_memory_usage_threshold") + checkMinMax(&v, req.RocketPoolMaxCollateralThreshold, 0, 1, "rocket_pool_max_collateral_threshold") + checkMinMax(&v, req.RocketPoolMinCollateralThreshold, 0, 1, "rocket_pool_min_collateral_threshold") + // TODO: check validity of clients + if v.hasErrors() { + handleErr(w, r, v) + return + } + err = h.dai.UpdateNotificationSettingsGeneral(r.Context(), userId, req) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsGeneralResponse{ + Data: req, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsNetworks godoc +// +// @Description Update network notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param network path string true "The networks name or chain ID." +// @Param request body types.NotificationSettingsNetwork true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsNetworksResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/networks/{network} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + var req types.NotificationSettingsNetwork + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + checkMinMax(&v, req.ParticipationRateThreshold, 0, 1, "participation_rate_threshold") + + chainId := v.checkNetworkParameter(mux.Vars(r)["network"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + err = h.dai.UpdateNotificationSettingsNetworks(r.Context(), userId, chainId, req) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsNetworksResponse{ + Data: types.NotificationNetwork{ + ChainId: chainId, + Settings: req, + }, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsPairedDevices godoc +// +// @Description Update paired device notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param paired_device_id path string true "The paired device ID." +// @Param request body handlers.PublicPutUserNotificationSettingsPairedDevices.request true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsPairedDevicesResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { + var v validationError + type request struct { + Name string `json:"name,omitempty"` + IsNotificationsEnabled bool `json:"is_notifications_enabled"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + // TODO use a better way to validate the paired device id + pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") + name := v.checkNameNotEmpty(req.Name) + if v.hasErrors() { + handleErr(w, r, v) + return + } + err := h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), pairedDeviceId, name, req.IsNotificationsEnabled) + if err != nil { + handleErr(w, r, err) + return + } + // TODO timestamp + response := types.InternalPutUserNotificationSettingsPairedDevicesResponse{ + Data: types.NotificationPairedDevice{ + Id: pairedDeviceId, + Name: req.Name, + IsNotificationsEnabled: req.IsNotificationsEnabled, + }, + } + + returnOk(w, r, response) +} + +// PublicDeleteUserNotificationSettingsPairedDevices godoc +// +// @Description Delete paired device notification settings for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Param paired_device_id path string true "The paired device ID." +// @Success 204 +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [delete] +func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { + var v validationError + // TODO use a better way to validate the paired device id + pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") + if v.hasErrors() { + handleErr(w, r, v) + return + } + err := h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), pairedDeviceId) + if err != nil { + handleErr(w, r, err) + return + } + returnNoContent(w, r) +} + +// PublicGetUserNotificationSettingsDashboards godoc +// +// @Description Get a list of notification settings for the dashboards of the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." +// @Param limit query string false "The maximum number of results that may be returned." +// @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums (dashboard_id, group_name) +// @Param search query string false "Search for Dashboard, Group" +// @Success 200 {object} types.InternalGetUserNotificationSettingsDashboardsResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/dashboards [get] +func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + q := r.URL.Query() + pagingParams := v.checkPagingParams(q) + sort := checkSort[enums.NotificationSettingsDashboardColumn](&v, q.Get("sort")) + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, paging, err := h.dai.GetNotificationSettingsDashboards(r.Context(), userId, pagingParams.cursor, *sort, pagingParams.search, pagingParams.limit) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetUserNotificationSettingsDashboardsResponse{ + Data: data, + Paging: *paging, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsValidatorDashboard godoc +// +// @Description Update the notification settings for a specific group of a validator dashboard for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param request body types.NotificationSettingsValidatorDashboard true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsValidatorDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/validator-dashboards/{dashboard_id}/groups/{group_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + var req types.NotificationSettingsValidatorDashboard + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + checkMinMax(&v, req.GroupOfflineThreshold, 0, 1, "group_offline_threshold") + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + err := h.dai.UpdateNotificationSettingsValidatorDashboard(r.Context(), dashboardId, groupId, req) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsValidatorDashboardResponse{ + Data: req, + } + returnOk(w, r, response) +} + +// PublicPutUserNotificationSettingsAccountDashboard godoc +// +// @Description Update the notification settings for a specific group of an account dashboard for the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param dashboard_id path string true "The ID of the dashboard." +// @Param group_id path string true "The ID of the group." +// @Param request body handlers.PublicPutUserNotificationSettingsAccountDashboard.request true "Notification settings" +// @Success 200 {object} types.InternalPutUserNotificationSettingsAccountDashboardResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/account-dashboards/{dashboard_id}/groups/{group_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsAccountDashboard(w http.ResponseWriter, r *http.Request) { + var v validationError + // uses a different struct due to `subscribed_chain_ids`, which is a slice of intOrString in the payload but a slice of uint64 in the response + type request struct { + WebhookUrl string `json:"webhook_url"` + IsWebhookDiscordEnabled bool `json:"is_webhook_discord_enabled"` + IsIgnoreSpamTransactionsEnabled bool `json:"is_ignore_spam_transactions_enabled"` + SubscribedChainIds []intOrString `json:"subscribed_chain_ids"` + + IsIncomingTransactionsSubscribed bool `json:"is_incoming_transactions_subscribed"` + IsOutgoingTransactionsSubscribed bool `json:"is_outgoing_transactions_subscribed"` + IsERC20TokenTransfersSubscribed bool `json:"is_erc20_token_transfers_subscribed"` + ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold"` // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled + IsERC721TokenTransfersSubscribed bool `json:"is_erc721_token_transfers_subscribed"` + IsERC1155TokenTransfersSubscribed bool `json:"is_erc1155_token_transfers_subscribed"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + chainIdMap := v.checkNetworkSlice(req.SubscribedChainIds) + // convert to uint64[] slice + chainIds := make([]uint64, len(chainIdMap)) + i := 0 + for k := range chainIdMap { + chainIds[i] = k + i++ + } + checkMinMax(&v, req.ERC20TokenTransfersValueThreshold, 0, math.MaxFloat64, "group_offline_threshold") + vars := mux.Vars(r) + dashboardId := v.checkPrimaryDashboardId(vars["dashboard_id"]) + groupId := v.checkExistingGroupId(vars["group_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + settings := types.NotificationSettingsAccountDashboard{ + WebhookUrl: req.WebhookUrl, + IsWebhookDiscordEnabled: req.IsWebhookDiscordEnabled, + IsIgnoreSpamTransactionsEnabled: req.IsIgnoreSpamTransactionsEnabled, + SubscribedChainIds: chainIds, + + IsIncomingTransactionsSubscribed: req.IsIncomingTransactionsSubscribed, + IsOutgoingTransactionsSubscribed: req.IsOutgoingTransactionsSubscribed, + IsERC20TokenTransfersSubscribed: req.IsERC20TokenTransfersSubscribed, + ERC20TokenTransfersValueThreshold: req.ERC20TokenTransfersValueThreshold, + IsERC721TokenTransfersSubscribed: req.IsERC721TokenTransfersSubscribed, + IsERC1155TokenTransfersSubscribed: req.IsERC1155TokenTransfersSubscribed, + } + err := h.dai.UpdateNotificationSettingsAccountDashboard(r.Context(), dashboardId, groupId, settings) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsAccountDashboardResponse{ + Data: settings, + } + returnOk(w, r, response) +} + +// PublicPostUserNotificationsTestEmail godoc +// +// @Description Send a test email notification to the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Success 204 +// @Router /users/me/notifications/test-email [post] +func (h *HandlerService) PublicPostUserNotificationsTestEmail(w http.ResponseWriter, r *http.Request) { + // TODO + returnNoContent(w, r) +} + +// PublicPostUserNotificationsTestPush godoc +// +// @Description Send a test push notification to the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Produce json +// @Success 204 +// @Router /users/me/notifications/test-push [post] +func (h *HandlerService) PublicPostUserNotificationsTestPush(w http.ResponseWriter, r *http.Request) { + // TODO + returnNoContent(w, r) +} + +// PublicPostUserNotificationsTestWebhook godoc +// +// @Description Send a test webhook notification from the authenticated user to the given URL. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param request body handlers.PublicPostUserNotificationsTestWebhook.request true "Request" +// @Success 204 +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/test-webhook [post] +func (h *HandlerService) PublicPostUserNotificationsTestWebhook(w http.ResponseWriter, r *http.Request) { + var v validationError + type request struct { + WebhookUrl string `json:"webhook_url"` + IsDiscordWebhookEnabled bool `json:"is_discord_webhook_enabled,omitempty"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + if v.hasErrors() { + handleErr(w, r, v) + return + } + // TODO + returnNoContent(w, r) +} + func (h *HandlerService) PublicGetNetworkValidators(w http.ResponseWriter, r *http.Request) { returnOk(w, r, nil) } diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 5338b5ccd..3238257bd 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -317,35 +317,36 @@ func addNotificationRoutes(hs *handlers.HandlerService, publicRouter, internalRo publicNotificationRouter.Use(hs.ManageViaApiCheckMiddleware) } endpoints := []endpoint{ - {http.MethodGet, "", nil, hs.InternalGetUserNotifications}, - {http.MethodGet, "/dashboards", nil, hs.InternalGetUserNotificationDashboards}, - {http.MethodGet, "/validator-dashboards/{notification_id}", nil, hs.InternalGetUserNotificationsValidatorDashboard}, - {http.MethodGet, "/account-dashboards/{notification_id}", nil, hs.InternalGetUserNotificationsAccountDashboard}, - {http.MethodGet, "/machines", nil, hs.InternalGetUserNotificationMachines}, - {http.MethodGet, "/clients", nil, hs.InternalGetUserNotificationClients}, - {http.MethodGet, "/rocket-pool", nil, hs.InternalGetUserNotificationRocketPool}, - {http.MethodGet, "/networks", nil, hs.InternalGetUserNotificationNetworks}, - {http.MethodGet, "/settings", nil, hs.InternalGetUserNotificationSettings}, - {http.MethodPut, "/settings/general", nil, hs.InternalPutUserNotificationSettingsGeneral}, - {http.MethodPut, "/settings/networks/{network}", nil, hs.InternalPutUserNotificationSettingsNetworks}, - {http.MethodPut, "/settings/paired-devices/{paired_device_id}", nil, hs.InternalPutUserNotificationSettingsPairedDevices}, - {http.MethodDelete, "/settings/paired-devices/{paired_device_id}", nil, hs.InternalDeleteUserNotificationSettingsPairedDevices}, - {http.MethodGet, "/settings/dashboards", nil, hs.InternalGetUserNotificationSettingsDashboards}, - {http.MethodPost, "/test-email", nil, hs.InternalPostUserNotificationsTestEmail}, - {http.MethodPost, "/test-push", nil, hs.InternalPostUserNotificationsTestPush}, - {http.MethodPost, "/test-webhook", nil, hs.InternalPostUserNotificationsTestWebhook}, + {http.MethodGet, "", hs.PublicGetUserNotifications, hs.InternalGetUserNotifications}, + {http.MethodGet, "/dashboards", hs.PublicGetUserNotificationDashboards, hs.InternalGetUserNotificationDashboards}, + {http.MethodGet, "/machines", hs.PublicGetUserNotificationMachines, hs.InternalGetUserNotificationMachines}, + {http.MethodGet, "/clients", hs.PublicGetUserNotificationClients, hs.InternalGetUserNotificationClients}, + {http.MethodGet, "/rocket-pool", hs.PublicGetUserNotificationRocketPool, hs.InternalGetUserNotificationRocketPool}, + {http.MethodGet, "/networks", hs.PublicGetUserNotificationNetworks, hs.InternalGetUserNotificationNetworks}, + {http.MethodGet, "/settings", hs.PublicGetUserNotificationSettings, hs.InternalGetUserNotificationSettings}, + {http.MethodPut, "/settings/general", hs.PublicPutUserNotificationSettingsGeneral, hs.InternalPutUserNotificationSettingsGeneral}, + {http.MethodPut, "/settings/networks/{network}", hs.PublicPutUserNotificationSettingsNetworks, hs.InternalPutUserNotificationSettingsNetworks}, + {http.MethodPut, "/settings/paired-devices/{paired_device_id}", hs.PublicPutUserNotificationSettingsPairedDevices, hs.InternalPutUserNotificationSettingsPairedDevices}, + {http.MethodDelete, "/settings/paired-devices/{paired_device_id}", hs.PublicDeleteUserNotificationSettingsPairedDevices, hs.InternalDeleteUserNotificationSettingsPairedDevices}, + {http.MethodGet, "/settings/dashboards", hs.PublicGetUserNotificationSettingsDashboards, hs.InternalGetUserNotificationSettingsDashboards}, + {http.MethodPost, "/test-email", hs.PublicPostUserNotificationsTestEmail, hs.InternalPostUserNotificationsTestEmail}, + {http.MethodPost, "/test-push", hs.PublicPostUserNotificationsTestPush, hs.InternalPostUserNotificationsTestPush}, + {http.MethodPost, "/test-webhook", hs.PublicPostUserNotificationsTestWebhook, hs.InternalPostUserNotificationsTestWebhook}, } addEndpointsToRouters(endpoints, publicNotificationRouter, internalNotificationRouter) publicDashboardNotificationSettingsRouter := publicNotificationRouter.NewRoute().Subrouter() internalDashboardNotificationSettingsRouter := internalNotificationRouter.NewRoute().Subrouter() + // TODO add adb auth middleware to account dashboard endpoints once they are implemented if !debug { publicDashboardNotificationSettingsRouter.Use(hs.VDBAuthMiddleware) internalDashboardNotificationSettingsRouter.Use(hs.VDBAuthMiddleware) } dashboardSettingsEndpoints := []endpoint{ - {http.MethodPut, "/settings/validator-dashboards/{dashboard_id}/groups/{group_id}", nil, hs.InternalPutUserNotificationSettingsValidatorDashboard}, - {http.MethodPut, "/settings/account-dashboards/{dashboard_id}/groups/{group_id}", nil, hs.InternalPutUserNotificationSettingsAccountDashboard}, + {http.MethodGet, "/validator-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch}", hs.PublicGetUserNotificationsValidatorDashboard, hs.InternalGetUserNotificationsValidatorDashboard}, + {http.MethodGet, "/account-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch}", hs.PublicGetUserNotificationsAccountDashboard, hs.InternalGetUserNotificationsAccountDashboard}, + {http.MethodPut, "/settings/validator-dashboards/{dashboard_id}/groups/{group_id}", hs.PublicPutUserNotificationSettingsValidatorDashboard, hs.InternalPutUserNotificationSettingsValidatorDashboard}, + {http.MethodPut, "/settings/account-dashboards/{dashboard_id}/groups/{group_id}", hs.PublicPutUserNotificationSettingsAccountDashboard, hs.InternalPutUserNotificationSettingsAccountDashboard}, } addEndpointsToRouters(dashboardSettingsEndpoints, publicDashboardNotificationSettingsRouter, internalDashboardNotificationSettingsRouter) } From 6779fe4e8cd3878215d460dbcde29bb9eada120a Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Wed, 18 Sep 2024 12:31:50 +0200 Subject: [PATCH 133/187] (BEDS-487) pass user id when updating paired devices (#865) --- backend/pkg/api/data_access/dummy.go | 4 ++-- backend/pkg/api/data_access/notifications.go | 12 ++++++------ backend/pkg/api/handlers/public.go | 14 ++++++++++++-- frontend/types/api/archiver.ts | 16 ---------------- frontend/types/api/validator_dashboard.ts | 2 +- 5 files changed, 21 insertions(+), 27 deletions(-) delete mode 100644 frontend/types/api/archiver.ts diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 1dc80125d..7aef7bc48 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -482,10 +482,10 @@ func (d *DummyService) UpdateNotificationSettingsGeneral(ctx context.Context, us func (d *DummyService) UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error { return nil } -func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { +func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { return nil } -func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string) error { +func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { return nil } func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 94530d441..5389847db 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -23,8 +23,8 @@ type NotificationsRepository interface { GetNotificationSettings(ctx context.Context, userId uint64) (*t.NotificationSettings, error) UpdateNotificationSettingsGeneral(ctx context.Context, userId uint64, settings t.NotificationSettingsGeneral) error UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error - UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string, name string, IsNotificationsEnabled bool) error - DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string) error + UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error + DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error UpdateNotificationSettingsAccountDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error @@ -66,11 +66,11 @@ func (d *DataAccessService) UpdateNotificationSettingsGeneral(ctx context.Contex func (d *DataAccessService) UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error { return d.dummy.UpdateNotificationSettingsNetworks(ctx, userId, chainId, settings) } -func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { - return d.dummy.UpdateNotificationSettingsPairedDevice(ctx, pairedDeviceId, name, IsNotificationsEnabled) +func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error { + return d.dummy.UpdateNotificationSettingsPairedDevice(ctx, userId, pairedDeviceId, name, IsNotificationsEnabled) } -func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, pairedDeviceId string) error { - return d.dummy.DeleteNotificationSettingsPairedDevice(ctx, pairedDeviceId) +func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { + return d.dummy.DeleteNotificationSettingsPairedDevice(ctx, userId, pairedDeviceId) } func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { return d.dummy.GetNotificationSettingsDashboards(ctx, userId, cursor, colSort, search, limit) diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index a31ad6bb2..b3ffd15dd 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -2278,6 +2278,11 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon // @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [put] func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } type request struct { Name string `json:"name,omitempty"` IsNotificationsEnabled bool `json:"is_notifications_enabled"` @@ -2294,7 +2299,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.R handleErr(w, r, v) return } - err := h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), pairedDeviceId, name, req.IsNotificationsEnabled) + err = h.dai.UpdateNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId, name, req.IsNotificationsEnabled) if err != nil { handleErr(w, r, err) return @@ -2323,13 +2328,18 @@ func (h *HandlerService) PublicPutUserNotificationSettingsPairedDevices(w http.R // @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [delete] func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w http.ResponseWriter, r *http.Request) { var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } // TODO use a better way to validate the paired device id pairedDeviceId := v.checkRegex(reNonEmpty, mux.Vars(r)["paired_device_id"], "paired_device_id") if v.hasErrors() { handleErr(w, r, v) return } - err := h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), pairedDeviceId) + err = h.dai.DeleteNotificationSettingsPairedDevice(r.Context(), userId, pairedDeviceId) if err != nil { handleErr(w, r, err) return diff --git a/frontend/types/api/archiver.ts b/frontend/types/api/archiver.ts deleted file mode 100644 index a0ecfe481..000000000 --- a/frontend/types/api/archiver.ts +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by tygo. DO NOT EDIT. -/* eslint-disable */ - -////////// -// source: archiver.go - -export interface ArchiverDashboard { - DashboardId: number /* uint64 */; - IsArchived: boolean; - GroupCount: number /* uint64 */; - ValidatorCount: number /* uint64 */; -} -export interface ArchiverDashboardArchiveReason { - DashboardId: number /* uint64 */; - ArchivedReason: any /* enums.VDBArchivedReason */; -} diff --git a/frontend/types/api/validator_dashboard.ts b/frontend/types/api/validator_dashboard.ts index 10fb9d3e1..03f07f548 100644 --- a/frontend/types/api/validator_dashboard.ts +++ b/frontend/types/api/validator_dashboard.ts @@ -160,8 +160,8 @@ export interface VDBBlocksTableRow { group_id: number /* uint64 */; epoch: number /* uint64 */; slot: number /* uint64 */; - status: 'success' | 'missed' | 'orphaned' | 'scheduled'; block?: number /* uint64 */; + status: 'success' | 'missed' | 'orphaned' | 'scheduled'; reward_recipient?: Address; reward?: ClElValue; graffiti?: string; From 49359ba42b76807335ace7275843885e67760ed2 Mon Sep 17 00:00:00 2001 From: remoterami <142154971+remoterami@users.noreply.github.com> Date: Wed, 18 Sep 2024 12:39:12 +0200 Subject: [PATCH 134/187] (BEDS-155) Data Access: adjusted address struct (#733) * adjusted address struct * adding contract data (WIP) * added contract status * retrieving address details (label, ens, ...) from single method * writing labels to result, migration fixed * CR feedback * renamed tags to names --- backend/pkg/api/data_access/general.go | 48 ++++++++ backend/pkg/api/data_access/vdb_blocks.go | 27 ++++- backend/pkg/api/data_access/vdb_deposits.go | 41 ++++++- .../pkg/api/data_access/vdb_withdrawals.go | 65 ++++++++--- backend/pkg/api/types/common.go | 6 +- backend/pkg/commons/db/bigtable_eth1.go | 106 +++++++++--------- backend/pkg/commons/db/ens.go | 10 +- .../20240822134034_add_address_tags.sql | 15 +++ frontend/types/api/common.ts | 2 + 9 files changed, 239 insertions(+), 81 deletions(-) create mode 100644 backend/pkg/api/data_access/general.go create mode 100644 backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql diff --git a/backend/pkg/api/data_access/general.go b/backend/pkg/api/data_access/general.go new file mode 100644 index 000000000..7debc5dfc --- /dev/null +++ b/backend/pkg/api/data_access/general.go @@ -0,0 +1,48 @@ +package dataaccess + +import ( + "context" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/gobitfly/beaconchain/pkg/api/types" + "github.com/gobitfly/beaconchain/pkg/commons/db" +) + +// retrieve (primary) ens name and optional name (=label) maintained by beaconcha.in, if present +func (d *DataAccessService) GetNamesAndEnsForAddresses(ctx context.Context, addressMap map[string]*types.Address) error { + addresses := make([][]byte, 0, len(addressMap)) + ensMapping := make(map[string]string, len(addressMap)) + for address, data := range addressMap { + ensMapping[address] = "" + add, err := hexutil.Decode(address) + if err != nil { + return err + } + addresses = append(addresses, add) + if data == nil { + addressMap[address] = &types.Address{Hash: types.Hash(address)} + } + } + // determine ENS names + if err := db.GetEnsNamesForAddresses(ensMapping); err != nil { + return err + } + for address, ens := range ensMapping { + addressMap[address].Ens = ens + } + + // determine names + names := []struct { + Address []byte `db:"address"` + Name string `db:"name"` + }{} + err := d.alloyReader.SelectContext(ctx, &names, `SELECT address, name FROM address_names WHERE address = ANY($1)`, addresses) + if err != nil { + return err + } + + for _, name := range names { + addressMap[hexutil.Encode(name.Address)].Label = name.Name + } + return nil +} diff --git a/backend/pkg/api/data_access/vdb_blocks.go b/backend/pkg/api/data_access/vdb_blocks.go index 3780599b7..2a2a0953a 100644 --- a/backend/pkg/api/data_access/vdb_blocks.go +++ b/backend/pkg/api/data_access/vdb_blocks.go @@ -15,6 +15,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" "github.com/gobitfly/beaconchain/pkg/commons/log" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/shopspring/decimal" ) @@ -345,7 +346,8 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das } data := make([]t.VDBBlocksTableRow, len(proposals)) - ensMapping := make(map[string]string) + addressMapping := make(map[string]*t.Address) + contractStatusRequests := make([]db.ContractInteractionAtRequest, 0, len(proposals)) for i, proposal := range proposals { data[i].GroupId = proposal.Group if dashboardId.AggregateGroups { @@ -382,7 +384,13 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das Hash: t.Hash(hexutil.Encode(proposal.FeeRecipient)), } data[i].RewardRecipient = &rewardRecp - ensMapping[hexutil.Encode(proposal.FeeRecipient)] = "" + addressMapping[hexutil.Encode(proposal.FeeRecipient)] = nil + contractStatusRequests = append(contractStatusRequests, db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", proposal.FeeRecipient), + Block: proposal.Block.Int64, + TxIdx: -1, + TraceIdx: -1, + }) reward.El = proposal.ElReward.Decimal.Mul(decimal.NewFromInt(1e18)) } if proposal.ClReward.Valid { @@ -393,13 +401,22 @@ func (d *DataAccessService) GetValidatorDashboardBlocks(ctx context.Context, das } // determine reward recipient ENS names startTime = time.Now() - if err := db.GetEnsNamesForAddresses(ensMapping); err != nil { + // determine ens/names + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { return nil, nil, err } - log.Debugf("=== getting ens names took %s", time.Since(startTime)) + log.Debugf("=== getting ens + labels names took %s", time.Since(startTime)) + // determine contract statuses + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { + return nil, nil, err + } + var contractIdx int for i := range data { if data[i].RewardRecipient != nil { - data[i].RewardRecipient.Ens = ensMapping[string(data[i].RewardRecipient.Hash)] + data[i].RewardRecipient = addressMapping[string(data[i].RewardRecipient.Hash)] + data[i].RewardRecipient.IsContract = contractStatuses[contractIdx] == types.CONTRACT_CREATION || contractStatuses[contractIdx] == types.CONTRACT_PRESENT + contractIdx += 1 } } if !moreDataFlag && !currentCursor.IsValid() { diff --git a/backend/pkg/api/data_access/vdb_deposits.go b/backend/pkg/api/data_access/vdb_deposits.go index 7d32cc002..8c1d49003 100644 --- a/backend/pkg/api/data_access/vdb_deposits.go +++ b/backend/pkg/api/data_access/vdb_deposits.go @@ -14,6 +14,7 @@ import ( "github.com/gobitfly/beaconchain/pkg/api/enums" t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" "github.com/shopspring/decimal" @@ -121,17 +122,27 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, } responseData := make([]t.VDBExecutionDepositsTableRow, len(data)) + addressMapping := make(map[string]*t.Address) + fromContractStatusRequests := make([]db.ContractInteractionAtRequest, len(data)) + depositorContractStatusRequests := make([]db.ContractInteractionAtRequest, 0, len(data)) for i, row := range data { responseData[i] = t.VDBExecutionDepositsTableRow{ PublicKey: t.PubKey(pubkeys[i]), Block: uint64(row.BlockNumber), Timestamp: row.Timestamp.Unix(), - From: t.Address{Hash: t.Hash(hexutil.Encode(row.From))}, TxHash: t.Hash(hexutil.Encode(row.TxHash)), WithdrawalCredential: t.Hash(hexutil.Encode(row.WithdrawalCredentials)), Amount: utils.GWeiToWei(big.NewInt(row.Amount)), Valid: row.Valid, } + addressMapping[hexutil.Encode(row.From)] = nil + fromContractStatusRequests[i] = db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", row.From), + Block: row.BlockNumber, + // TODO not entirely correct, would need to determine tx index and itx index of tx. But good enough for now + TxIdx: -1, + TraceIdx: -1, + } if row.GroupId.Valid { if dashboardId.AggregateGroups { responseData[i].GroupId = t.DefaultGroupId @@ -143,6 +154,10 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, } if len(row.Depositor) > 0 { responseData[i].Depositor = t.Address{Hash: t.Hash(hexutil.Encode(row.Depositor))} + addressMapping[hexutil.Encode(row.Depositor)] = nil + depositorReq := fromContractStatusRequests[i] + depositorReq.Address = fmt.Sprintf("%x", row.Depositor) + depositorContractStatusRequests = append(depositorContractStatusRequests, depositorReq) } else { responseData[i].Depositor = responseData[i].From } @@ -150,6 +165,30 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, responseData[i].Index = &v } } + + // populate address data + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { + return nil, nil, err + } + fromContractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(fromContractStatusRequests) + if err != nil { + return nil, nil, err + } + depositorContractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(depositorContractStatusRequests) + if err != nil { + return nil, nil, err + } + var depositorIdx int + for i := range data { + responseData[i].From = *addressMapping[string(responseData[i].From.Hash)] + responseData[i].From.IsContract = fromContractStatuses[i] == types.CONTRACT_CREATION || fromContractStatuses[i] == types.CONTRACT_PRESENT + responseData[i].Depositor.IsContract = responseData[i].From.IsContract + if responseData[i].Depositor.Hash != responseData[i].From.Hash { + responseData[i].Depositor.IsContract = depositorContractStatuses[depositorIdx] == types.CONTRACT_CREATION || depositorContractStatuses[depositorIdx] == types.CONTRACT_PRESENT + depositorIdx += 1 + } + } + var paging t.Paging moreDataFlag := len(responseData) > int(limit) diff --git a/backend/pkg/api/data_access/vdb_withdrawals.go b/backend/pkg/api/data_access/vdb_withdrawals.go index c3837f6f0..21a190f44 100644 --- a/backend/pkg/api/data_access/vdb_withdrawals.go +++ b/backend/pkg/api/data_access/vdb_withdrawals.go @@ -17,6 +17,7 @@ import ( t "github.com/gobitfly/beaconchain/pkg/api/types" "github.com/gobitfly/beaconchain/pkg/commons/cache" "github.com/gobitfly/beaconchain/pkg/commons/db" + "github.com/gobitfly/beaconchain/pkg/commons/types" "github.com/gobitfly/beaconchain/pkg/commons/utils" "github.com/lib/pq" "github.com/pkg/errors" @@ -111,6 +112,7 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context // Get the withdrawals for the validators queryResult := []struct { BlockSlot uint64 `db:"block_slot"` + BlockNumber uint64 `db:"exec_block_number"` WithdrawalIndex uint64 `db:"withdrawalindex"` ValidatorIndex uint64 `db:"validatorindex"` Address []byte `db:"address"` @@ -121,6 +123,7 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context withdrawalsQuery := ` SELECT w.block_slot, + b.exec_block_number, w.withdrawalindex, w.validatorindex, w.address, @@ -196,32 +199,43 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context } // Prepare the ENS map - addressEns := make(map[string]string) - for _, withdrawal := range queryResult { + addressMapping := make(map[string]*t.Address) + contractStatusRequests := make([]db.ContractInteractionAtRequest, len(queryResult)) + for i, withdrawal := range queryResult { address := hexutil.Encode(withdrawal.Address) - addressEns[address] = "" + addressMapping[address] = nil + contractStatusRequests[i] = db.ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", withdrawal.Address), + Block: int64(withdrawal.BlockNumber), + TxIdx: -1, + TraceIdx: -1, + } + } + + // Get the ENS names and (label) names for the addresses + if err := d.GetNamesAndEnsForAddresses(ctx, addressMapping); err != nil { + return nil, nil, err } - // Get the ENS names for the addresses - if err := db.GetEnsNamesForAddresses(addressEns); err != nil { + // Get the contract status for the addresses + contractStatuses, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusRequests) + if err != nil { return nil, nil, err } // Create the result cursorData := make([]t.WithdrawalsCursor, 0) - for _, withdrawal := range queryResult { + for i, withdrawal := range queryResult { address := hexutil.Encode(withdrawal.Address) result = append(result, t.VDBWithdrawalsTableRow{ - Epoch: withdrawal.BlockSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch, - Slot: withdrawal.BlockSlot, - Index: withdrawal.ValidatorIndex, - GroupId: validatorGroupMap[withdrawal.ValidatorIndex], - Recipient: t.Address{ - Hash: t.Hash(address), - Ens: addressEns[address], - }, - Amount: utils.GWeiToWei(big.NewInt(int64(withdrawal.Amount))), + Epoch: withdrawal.BlockSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch, + Slot: withdrawal.BlockSlot, + Index: withdrawal.ValidatorIndex, + Recipient: *addressMapping[address], + GroupId: validatorGroupMap[withdrawal.ValidatorIndex], + Amount: utils.GWeiToWei(big.NewInt(int64(withdrawal.Amount))), }) + result[i].Recipient.IsContract = contractStatuses[i] == types.CONTRACT_CREATION || contractStatuses[i] == types.CONTRACT_PRESENT cursorData = append(cursorData, t.WithdrawalsCursor{ Slot: withdrawal.BlockSlot, WithdrawalIndex: withdrawal.WithdrawalIndex, @@ -256,7 +270,8 @@ func (d *DataAccessService) GetValidatorDashboardWithdrawals(ctx context.Context if nextData != nil { // Complete the next data nextData.GroupId = validatorGroupMap[nextData.Index] - nextData.Recipient.Ens = addressEns[string(nextData.Recipient.Hash)] + // TODO integrate label/ens data for "next" row + // nextData.Recipient.Ens = addressEns[string(nextData.Recipient.Hash)] } else { // If there is no next data, add a missing estimate row nextData = &t.VDBWithdrawalsTableRow{ @@ -393,12 +408,28 @@ func (d *DataAccessService) getNextWithdrawalRow(queryValidators []t.VDBValidato withdrawalAmount = 0 } + ens_name, err := db.GetEnsNameForAddress(*address, utils.SlotToTime(nextWithdrawalSlot)) + if err != sql.ErrNoRows { + return nil, err + } + + contractStatusReq := []db.ContractInteractionAtRequest{{ + Address: fmt.Sprintf("%x", address), + Block: -1, + }} + contractStatus, err := d.bigtable.GetAddressContractInteractionsAt(contractStatusReq) + if err != nil { + return nil, err + } + nextData := &t.VDBWithdrawalsTableRow{ Epoch: nextWithdrawalSlot / utils.Config.Chain.ClConfig.SlotsPerEpoch, Slot: nextWithdrawalSlot, Index: *nextValidator, Recipient: t.Address{ - Hash: t.Hash(address.String()), + Hash: t.Hash(address.String()), + Ens: ens_name, + IsContract: contractStatus[0] == types.CONTRACT_CREATION || contractStatus[0] == types.CONTRACT_PRESENT, }, Amount: utils.GWeiToWei(big.NewInt(int64(withdrawalAmount))), } diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index 2c7253623..2deebb1b9 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -34,8 +34,10 @@ type PubKey string type Hash string // blocks, txs etc. type Address struct { - Hash Hash `json:"hash"` - Ens string `json:"ens,omitempty"` + Hash Hash `json:"hash"` + IsContract bool `json:"is_contract"` + Ens string `json:"ens,omitempty"` + Label string `json:"label,omitempty"` } type LuckItem struct { diff --git a/backend/pkg/commons/db/bigtable_eth1.go b/backend/pkg/commons/db/bigtable_eth1.go index e2fca3052..3861374f2 100644 --- a/backend/pkg/commons/db/bigtable_eth1.go +++ b/backend/pkg/commons/db/bigtable_eth1.go @@ -3138,7 +3138,7 @@ func (bigtable *Bigtable) GetAddressName(address []byte) (string, error) { add := common.Address{} add.SetBytes(address) - name, err := GetEnsNameForAddress(add) + name, err := GetEnsNameForAddress(add, time.Time{}) if err == nil && len(name) > 0 { return name, nil } @@ -3215,11 +3215,11 @@ type isContractInfo struct { ts gcp_bigtable.Timestamp } -type contractInteractionAtRequest struct { - address string - block int64 - txIdx int64 - traceIdx int64 +type ContractInteractionAtRequest struct { + Address string // expected all lowercase without 0x prefix + Block int64 + TxIdx int64 + TraceIdx int64 } func (bigtable *Bigtable) getAddressIsContractHistories(histories map[string][]isContractInfo) error { @@ -3270,7 +3270,7 @@ func (bigtable *Bigtable) getAddressIsContractHistories(histories map[string][]i // returns account state after the given execution state // -1 is latest (e.g. "txIdx" = -1 returns the contract state after execution of "block", "block" = -1 returns the state at chain head) -func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractInteractionAtRequest) ([]types.ContractInteractionType, error) { +func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []ContractInteractionAtRequest) ([]types.ContractInteractionType, error) { results := make([]types.ContractInteractionType, len(requests)) if len(requests) == 0 { return results, nil @@ -3279,7 +3279,7 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn // get histories histories := make(map[string][]isContractInfo, len(requests)) for _, request := range requests { - histories[request.address] = nil + histories[request.Address] = nil } err := bigtable.getAddressIsContractHistories(histories) if err != nil { @@ -3288,22 +3288,22 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn // evaluate requests; CONTRACT_NONE is default for i, request := range requests { - history, ok := histories[request.address] + history, ok := histories[request.Address] if !ok || history == nil || len(history) == 0 { continue } latestUpdateIdxBeforeReq := 0 - if request.block != -1 { + if request.Block != -1 { var block, tx, itx uint64 - if request.txIdx == -1 { - block = uint64(request.block + 1) - } else if request.traceIdx == -1 { - block = uint64(request.block) - tx = uint64(request.txIdx + 1) + if request.TxIdx == -1 { + block = uint64(request.Block + 1) + } else if request.TraceIdx == -1 { + block = uint64(request.Block) + tx = uint64(request.TxIdx + 1) } else { - block = uint64(request.block) - tx = uint64(request.txIdx) - itx = uint64(request.traceIdx + 1) + block = uint64(request.Block) + tx = uint64(request.TxIdx) + itx = uint64(request.TraceIdx + 1) } req_ts, err := encodeIsContractUpdateTs(block, tx, itx) if err != nil { @@ -3319,7 +3319,7 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn } b, tx, trace := decodeIsContractUpdateTs(history[latestUpdateIdxBeforeReq].ts) - exact_match := request.block == -1 || request.block == int64(b) && (request.txIdx == -1 || request.txIdx == int64(tx) && (request.traceIdx == -1 || request.traceIdx == int64(trace))) + exact_match := request.Block == -1 || request.Block == int64(b) && (request.TxIdx == -1 || request.TxIdx == int64(tx) && (request.TraceIdx == -1 || request.TraceIdx == int64(trace))) if exact_match { results[i] = types.CONTRACT_DESTRUCTION @@ -3343,17 +3343,17 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAt(requests []contractIn // convenience function to get contract interaction status per transaction of a block func (bigtable *Bigtable) GetAddressContractInteractionsAtBlock(block *types.Eth1Block) ([]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, len(block.GetTransactions())) + requests := make([]ContractInteractionAtRequest, len(block.GetTransactions())) for i, tx := range block.GetTransactions() { address := tx.GetTo() if len(address) == 0 { address = tx.GetContractAddress() } - requests[i] = contractInteractionAtRequest{ - address: fmt.Sprintf("%x", address), - block: int64(block.GetNumber()), - txIdx: int64(i), - traceIdx: -1, + requests[i] = ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", address), + Block: int64(block.GetNumber()), + TxIdx: int64(i), + TraceIdx: -1, } } @@ -3363,19 +3363,19 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAtBlock(block *types.Eth // convenience function to get contract interaction status per subtransaction of a transaction // 2nd parameter specifies [tx_idx, trace_idx] for each internal tx func (bigtable *Bigtable) GetAddressContractInteractionsAtITransactions(itransactions []*types.Eth1InternalTransactionIndexed, idxs [][2]int64) ([][2]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, 0, len(itransactions)*2) + requests := make([]ContractInteractionAtRequest, 0, len(itransactions)*2) for i, tx := range itransactions { - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", tx.GetFrom()), - block: int64(tx.GetBlockNumber()), - txIdx: idxs[i][0], - traceIdx: idxs[i][1], + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", tx.GetFrom()), + Block: int64(tx.GetBlockNumber()), + TxIdx: idxs[i][0], + TraceIdx: idxs[i][1], }) - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", tx.GetTo()), - block: int64(tx.GetBlockNumber()), - txIdx: idxs[i][0], - traceIdx: idxs[i][1], + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", tx.GetTo()), + Block: int64(tx.GetBlockNumber()), + TxIdx: idxs[i][0], + TraceIdx: idxs[i][1], }) } results, err := bigtable.GetAddressContractInteractionsAt(requests) @@ -3392,20 +3392,20 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAtITransactions(itransac // convenience function to get contract interaction status per parity trace func (bigtable *Bigtable) GetAddressContractInteractionsAtParityTraces(traces []*rpc.ParityTraceResult) ([][2]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, 0, len(traces)*2) + requests := make([]ContractInteractionAtRequest, 0, len(traces)*2) for i, itx := range traces { from, to, _, _ := itx.ConvertFields() - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", from), - block: int64(itx.BlockNumber), - txIdx: int64(itx.TransactionPosition), - traceIdx: int64(i), + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", from), + Block: int64(itx.BlockNumber), + TxIdx: int64(itx.TransactionPosition), + TraceIdx: int64(i), }) - requests = append(requests, contractInteractionAtRequest{ - address: fmt.Sprintf("%x", to), - block: int64(itx.BlockNumber), - txIdx: int64(itx.TransactionPosition), - traceIdx: int64(i), + requests = append(requests, ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", to), + Block: int64(itx.BlockNumber), + TxIdx: int64(itx.TransactionPosition), + TraceIdx: int64(i), }) } results, err := bigtable.GetAddressContractInteractionsAt(requests) @@ -3422,13 +3422,13 @@ func (bigtable *Bigtable) GetAddressContractInteractionsAtParityTraces(traces [] // convenience function to get contract interaction status per transaction func (bigtable *Bigtable) GetAddressContractInteractionsAtTransactions(transactions []*types.Eth1TransactionIndexed, idxs []int64) ([]types.ContractInteractionType, error) { - requests := make([]contractInteractionAtRequest, len(transactions)) + requests := make([]ContractInteractionAtRequest, len(transactions)) for i, tx := range transactions { - requests[i] = contractInteractionAtRequest{ - address: fmt.Sprintf("%x", tx.GetTo()), - block: int64(tx.GetBlockNumber()), - txIdx: idxs[i], - traceIdx: -1, + requests[i] = ContractInteractionAtRequest{ + Address: fmt.Sprintf("%x", tx.GetTo()), + Block: int64(tx.GetBlockNumber()), + TxIdx: idxs[i], + TraceIdx: -1, } } return bigtable.GetAddressContractInteractionsAt(requests) diff --git a/backend/pkg/commons/db/ens.go b/backend/pkg/commons/db/ens.go index 46001df56..5020f31e6 100644 --- a/backend/pkg/commons/db/ens.go +++ b/backend/pkg/commons/db/ens.go @@ -609,15 +609,19 @@ func GetAddressForEnsName(name string) (address *common.Address, err error) { return address, err } -func GetEnsNameForAddress(address common.Address) (name string, err error) { +// pass invalid time to get latest data +func GetEnsNameForAddress(address common.Address, validUntil time.Time) (name string, err error) { + if validUntil.IsZero() { + validUntil = time.Now() + } err = ReaderDb.Get(&name, ` SELECT ens_name FROM ens WHERE address = $1 AND is_primary_name AND - valid_to >= now() - ;`, address.Bytes()) + valid_to >= $2 + ;`, address.Bytes(), validUntil) return name, err } diff --git a/backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql b/backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql new file mode 100644 index 000000000..3579f7ef6 --- /dev/null +++ b/backend/pkg/commons/db/migrations/postgres/20240822134034_add_address_tags.sql @@ -0,0 +1,15 @@ +-- +goose Up +-- +goose StatementBegin +SELECT('up SQL query - create address_names table'); +CREATE TABLE IF NOT EXISTS address_names ( + address bytea NOT NULL UNIQUE, + name TEXT NOT NULL, + PRIMARY KEY (address, name) +); +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +SELECT('down SQL query - drop address_names table'); +DROP TABLE IF EXISTS address_names; +-- +goose StatementEnd diff --git a/frontend/types/api/common.ts b/frontend/types/api/common.ts index ecce7231e..3c8c9506a 100644 --- a/frontend/types/api/common.ts +++ b/frontend/types/api/common.ts @@ -27,7 +27,9 @@ export type PubKey = string; export type Hash = string; // blocks, txs etc. export interface Address { hash: Hash; + is_contract: boolean; ens?: string; + label?: string; } export interface LuckItem { percent: number /* float64 */; From c55824620a7a396b8cc637d751cda4a64ff6f31a Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 18 Sep 2024 12:50:22 +0200 Subject: [PATCH 135/187] fix(eth1indexer): fix handling of specific error (#687) (BEDS-90) * eth1indexer: less logging on ens * eth1indexer: fix handling of specific error --- backend/pkg/commons/db/ens.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/backend/pkg/commons/db/ens.go b/backend/pkg/commons/db/ens.go index 5020f31e6..119f6c00b 100644 --- a/backend/pkg/commons/db/ens.go +++ b/backend/pkg/commons/db/ens.go @@ -436,7 +436,7 @@ func validateEnsAddress(client *ethclient.Client, address common.Address, alread err.Error() == "no resolution" || err.Error() == "execution reverted" || strings.HasPrefix(err.Error(), "name is not valid") { - log.Warnf("reverse resolving address [%v] resulted in a skippable error [%s], skipping it", address, err.Error()) + // log.Warnf("reverse resolving address [%v] resulted in a skippable error [%s], skipping it", address, err.Error()) } else { return fmt.Errorf("error could not reverse resolve address [%v]: %w", address, err) } @@ -475,7 +475,7 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC nameHash, err := go_ens.NameHash(name) if err != nil { - log.Warnf("error could not hash name [%v]: %v -> removing ens entry", name, err) + // log.Warnf("error could not hash name [%v]: %v -> removing ens entry", name, err) err = removeEnsName(name) if err != nil { return fmt.Errorf("error removing ens name [%v]: %w", name, err) @@ -488,12 +488,12 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC if err.Error() == "unregistered name" || err.Error() == "no address" || err.Error() == "no resolver" || - err.Error() == "abi: attempting to unmarshall an empty string while arguments are expected" || + err.Error() == "abi: attempting to unmarshal an empty string while arguments are expected" || strings.Contains(err.Error(), "execution reverted") || err.Error() == "invalid jump destination" || err.Error() == "invalid opcode: INVALID" { // the given name is not available anymore or resolving it did not work properly => we can remove it from the db (if it is there) - log.Warnf("could not resolve name [%v]: %v -> removing ens entry", name, err) + // log.Warnf("could not resolve name [%v]: %v -> removing ens entry", name, err) err = removeEnsName(name) if err != nil { return fmt.Errorf("error removing ens name after resolve failed [%v]: %w", name, err) @@ -516,7 +516,7 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC reverseName, err := go_ens.ReverseResolve(client, addr) if err != nil { if err.Error() == "not a resolver" || err.Error() == "no resolution" || err.Error() == "execution reverted" { - log.Warnf("reverse resolving address [%v] for name [%v] resulted in an error [%s], marking entry as not primary", addr, name, err.Error()) + // log.Warnf("reverse resolving address [%v] for name [%v] resulted in an error [%s], marking entry as not primary", addr, name, err.Error()) } else { return fmt.Errorf("error could not reverse resolve address [%v]: %w", addr, err) } @@ -549,12 +549,12 @@ func validateEnsName(client *ethclient.Client, name string, alreadyChecked *EnsC return fmt.Errorf("error writing ens data for name [%v]: %w", name, err) } - log.InfoWithFields(log.Fields{ - "name": name, - "address": addr, - "expires": expires, - "reverseName": reverseName, - }, "validated ens name") + // log.InfoWithFields(log.Fields{ + // "name": name, + // "address": addr, + // "expires": expires, + // "reverseName": reverseName, + // }, "validated ens name") return nil } From 685d019444c8b3f813c8d12b17f3e090374df9a9 Mon Sep 17 00:00:00 2001 From: Patrick Date: Wed, 18 Sep 2024 14:13:52 +0200 Subject: [PATCH 136/187] fix(api): fix GetValidatorDashboardElDeposits (#866) BEDS-155 --- backend/pkg/api/data_access/vdb_deposits.go | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/pkg/api/data_access/vdb_deposits.go b/backend/pkg/api/data_access/vdb_deposits.go index 8c1d49003..db2026a23 100644 --- a/backend/pkg/api/data_access/vdb_deposits.go +++ b/backend/pkg/api/data_access/vdb_deposits.go @@ -134,6 +134,7 @@ func (d *DataAccessService) GetValidatorDashboardElDeposits(ctx context.Context, WithdrawalCredential: t.Hash(hexutil.Encode(row.WithdrawalCredentials)), Amount: utils.GWeiToWei(big.NewInt(row.Amount)), Valid: row.Valid, + From: t.Address{Hash: t.Hash(hexutil.Encode(row.From))}, } addressMapping[hexutil.Encode(row.From)] = nil fromContractStatusRequests[i] = db.ContractInteractionAtRequest{ From b261d85f4b704e62e476de709df193911be3d5d2 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 09:18:43 +0200 Subject: [PATCH 137/187] (BEDS-480) add vdb mobile widget endpoint (#869) --- backend/pkg/api/data_access/dummy.go | 8 +++-- backend/pkg/api/data_access/vdb.go | 2 ++ backend/pkg/api/data_access/vdb_management.go | 6 ++++ backend/pkg/api/handlers/common.go | 13 +++++--- backend/pkg/api/handlers/internal.go | 33 +++++++++++++++++++ backend/pkg/api/handlers/public.go | 10 +++++- backend/pkg/api/router.go | 1 + backend/pkg/api/types/common.go | 8 +++++ backend/pkg/api/types/dashboard.go | 16 ++++----- backend/pkg/api/types/mobile.go | 15 +++++++++ backend/pkg/api/types/validator_dashboard.go | 9 +---- frontend/types/api/common.ts | 7 ++++ frontend/types/api/mobile.ts | 13 +++++++- frontend/types/api/validator_dashboard.ts | 15 ++------- 14 files changed, 117 insertions(+), 39 deletions(-) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 7aef7bc48..7bee9a38c 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -637,9 +637,7 @@ func (d *DummyService) GetRocketPoolOverview(ctx context.Context) (*t.RocketPool } func (d *DummyService) GetApiWeights(ctx context.Context) ([]t.ApiWeightItem, error) { - r := []t.ApiWeightItem{} - err := commonFakeData(&r) - return r, err + return getDummyData[[]t.ApiWeightItem]() } func (d *DummyService) GetHealthz(ctx context.Context, showAll bool) t.HealthzData { @@ -654,3 +652,7 @@ func (d *DummyService) GetLatestBundleForNativeVersion(ctx context.Context, nati func (d *DummyService) IncrementBundleDeliveryCount(ctx context.Context, bundleVerison uint64) error { return nil } + +func (d *DummyService) GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) { + return getDummyStruct[t.MobileWidgetData]() +} diff --git a/backend/pkg/api/data_access/vdb.go b/backend/pkg/api/data_access/vdb.go index 1d38e5537..e7498702f 100644 --- a/backend/pkg/api/data_access/vdb.go +++ b/backend/pkg/api/data_access/vdb.go @@ -79,4 +79,6 @@ type ValidatorDashboardRepository interface { GetValidatorDashboardTotalRocketPool(ctx context.Context, dashboardId t.VDBId, search string) (*t.VDBRocketPoolTableRow, error) GetValidatorDashboardNodeRocketPool(ctx context.Context, dashboardId t.VDBId, node string) (*t.VDBNodeRocketPoolData, error) GetValidatorDashboardRocketPoolMinipools(ctx context.Context, dashboardId t.VDBId, node, cursor string, colSort t.Sort[enums.VDBRocketPoolMinipoolsColumn], search string, limit uint64) ([]t.VDBRocketPoolMinipoolsTableRow, *t.Paging, error) + + GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) } diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go index 3bd217507..d415f0a23 100644 --- a/backend/pkg/api/data_access/vdb_management.go +++ b/backend/pkg/api/data_access/vdb_management.go @@ -1213,3 +1213,9 @@ func (d *DataAccessService) GetValidatorDashboardPublicIdCount(ctx context.Conte `, dashboardId) return count, err } + +func (d *DataAccessService) GetValidatorDashboardMobileWidget(ctx context.Context, dashboardId t.VDBIdPrimary) (*t.MobileWidgetData, error) { + // TODO @Data-Access: Implement this function + // feel free to move this func to other file if needed + return d.dummy.GetValidatorDashboardMobileWidget(ctx, dashboardId) +} diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index ca585724b..a2c68cf24 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -78,6 +78,7 @@ const ( sortOrderAscending = "asc" sortOrderDescending = "desc" defaultSortOrder = sortOrderAscending + defaultDesc = defaultSortOrder == sortOrderDescending ethereum = "ethereum" gnosis = "gnosis" allowEmpty = true @@ -544,7 +545,7 @@ func checkEnumIsAllowed[T enums.EnumFactory[T]](v *validationError, enum T, allo func (v *validationError) parseSortOrder(order string) bool { switch order { case "": - return defaultSortOrder == sortOrderDescending + return defaultDesc case sortOrderAscending: return false case sortOrderDescending: @@ -558,19 +559,21 @@ func (v *validationError) parseSortOrder(order string) bool { func checkSort[T enums.EnumFactory[T]](v *validationError, sortString string) *types.Sort[T] { var c T if sortString == "" { - return &types.Sort[T]{Column: c, Desc: false} + return &types.Sort[T]{Column: c, Desc: defaultDesc} } sortSplit := strings.Split(sortString, ":") if len(sortSplit) > 2 { v.add("sort", fmt.Sprintf("given value '%s' for parameter 'sort' is not valid, expected format is '[:(asc|desc)]'", sortString)) return nil } + var desc bool if len(sortSplit) == 1 { - sortSplit = append(sortSplit, ":asc") + desc = defaultDesc + } else { + desc = v.parseSortOrder(sortSplit[1]) } sortCol := checkEnum[T](v, sortSplit[0], "sort") - order := v.parseSortOrder(sortSplit[1]) - return &types.Sort[T]{Column: sortCol, Desc: order} + return &types.Sort[T]{Column: sortCol, Desc: desc} } func (v *validationError) checkProtocolModes(protocolModes string) types.VDBProtocolModes { diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 0a0f5c006..78d998856 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -475,6 +475,39 @@ func (h *HandlerService) InternalGetValidatorDashboardRocketPoolMinipools(w http h.PublicGetValidatorDashboardRocketPoolMinipools(w, r) } +// even though this endpoint is internal only, it should still not be broken since it is used by the mobile app +func (h *HandlerService) InternalGetValidatorDashboardMobileWidget(w http.ResponseWriter, r *http.Request) { + var v validationError + dashboardId := v.checkPrimaryDashboardId(mux.Vars(r)["dashboard_id"]) + if v.hasErrors() { + handleErr(w, r, v) + return + } + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + userInfo, err := h.dai.GetUserInfo(r.Context(), userId) + if err != nil { + handleErr(w, r, err) + return + } + if userInfo.UserGroup != types.UserGroupAdmin && !userInfo.PremiumPerks.MobileAppWidget { + returnForbidden(w, r, errors.New("user does not have access to mobile app widget")) + return + } + data, err := h.dai.GetValidatorDashboardMobileWidget(r.Context(), dashboardId) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalGetValidatorDashboardMobileWidgetResponse{ + Data: *data, + } + returnOk(w, r, response) +} + // -------------------------------------- // Mobile diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index b3ffd15dd..e0e560b8c 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -61,6 +61,14 @@ func (h *HandlerService) PublicGetHealthzLoadbalancer(w http.ResponseWriter, r * returnOk(w, r, nil) } +// PublicGetUserDashboards godoc +// +// @Description Get all dashboards of the authenticated user. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Dashboards +// @Produce json +// @Success 200 {object} types.ApiDataResponse[types.UserDashboardsData] +// @Router /users/me/dashboards [get] func (h *HandlerService) PublicGetUserDashboards(w http.ResponseWriter, r *http.Request) { userId, err := GetUserIdByContext(r) if err != nil { @@ -859,7 +867,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW // @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "request" // @Success 200 {object} types.ApiDataResponse[types.VDBPostArchivingReturnData] // @Failure 400 {object} types.ApiErrorResponse -// @Conflict 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." +// @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." // @Router /validator-dashboards/{dashboard_id}/archiving [put] func (h *HandlerService) PublicPutValidatorDashboardArchiving(w http.ResponseWriter, r *http.Request) { var v validationError diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 3238257bd..16ab4c0b2 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -304,6 +304,7 @@ func addValidatorDashboardRoutes(hs *handlers.HandlerService, publicRouter, inte {http.MethodGet, "/{dashboard_id}/total-rocket-pool", hs.PublicGetValidatorDashboardTotalRocketPool, hs.InternalGetValidatorDashboardTotalRocketPool}, {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}", hs.PublicGetValidatorDashboardNodeRocketPool, hs.InternalGetValidatorDashboardNodeRocketPool}, {http.MethodGet, "/{dashboard_id}/rocket-pool/{node_address}/minipools", hs.PublicGetValidatorDashboardRocketPoolMinipools, hs.InternalGetValidatorDashboardRocketPoolMinipools}, + {http.MethodGet, "/{dashboard_id}/mobile-widget", nil, hs.InternalGetValidatorDashboardMobileWidget}, } addEndpointsToRouters(endpoints, publicDashboardRouter, internalDashboardRouter) } diff --git a/backend/pkg/api/types/common.go b/backend/pkg/api/types/common.go index 2deebb1b9..6f2856768 100644 --- a/backend/pkg/api/types/common.go +++ b/backend/pkg/api/types/common.go @@ -145,3 +145,11 @@ type IndexBlocks struct { Index uint64 `json:"index"` Blocks []uint64 `json:"blocks"` } + +type ValidatorStateCounts struct { + Online uint64 `json:"online"` + Offline uint64 `json:"offline"` + Pending uint64 `json:"pending"` + Exited uint64 `json:"exited"` + Slashed uint64 `json:"slashed"` +} diff --git a/backend/pkg/api/types/dashboard.go b/backend/pkg/api/types/dashboard.go index d3e334722..340a21b01 100644 --- a/backend/pkg/api/types/dashboard.go +++ b/backend/pkg/api/types/dashboard.go @@ -5,14 +5,14 @@ type AccountDashboard struct { Name string `json:"name"` } type ValidatorDashboard struct { - Id uint64 `json:"id"` - Name string `json:"name"` - Network uint64 `json:"network"` - PublicIds []VDBPublicId `json:"public_ids,omitempty"` - IsArchived bool `json:"is_archived"` - ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'"` - ValidatorCount uint64 `json:"validator_count"` - GroupCount uint64 `json:"group_count"` + Id uint64 `json:"id" extensions:"x-order=1"` + Name string `json:"name" extensions:"x-order=2"` + Network uint64 `json:"network" extensions:"x-order=3"` + PublicIds []VDBPublicId `json:"public_ids,omitempty" extensions:"x-order=4"` + IsArchived bool `json:"is_archived" extensions:"x-order=5"` + ArchivedReason string `json:"archived_reason,omitempty" tstype:"'user' | 'dashboard_limit' | 'validator_limit' | 'group_limit'" extensions:"x-order=6"` + ValidatorCount uint64 `json:"validator_count" extensions:"x-order=7"` + GroupCount uint64 `json:"group_count" extensions:"x-order=8"` } type UserDashboardsData struct { diff --git a/backend/pkg/api/types/mobile.go b/backend/pkg/api/types/mobile.go index 7f84a999d..62c323b0d 100644 --- a/backend/pkg/api/types/mobile.go +++ b/backend/pkg/api/types/mobile.go @@ -1,8 +1,23 @@ package types +import "github.com/shopspring/decimal" + type MobileBundleData struct { BundleUrl string `json:"bundle_url,omitempty"` HasNativeUpdateAvailable bool `json:"has_native_update_available"` } type GetMobileLatestBundleResponse ApiDataResponse[MobileBundleData] + +type MobileWidgetData struct { + ValidatorStateCounts ValidatorStateCounts `json:"validator_state_counts"` + Last24hIncome decimal.Decimal `json:"last_24h_income" faker:"eth"` + Last7dIncome decimal.Decimal `json:"last_7d_income" faker:"eth"` + Last30dApr float64 `json:"last_30d_apr"` + Last30dEfficiency decimal.Decimal `json:"last_30d_efficiency" faker:"eth"` + NetworkEfficiency float64 `json:"network_efficiency"` + RplPrice decimal.Decimal `json:"rpl_price" faker:"eth"` + RplApr float64 `json:"rpl_apr"` +} + +type InternalGetValidatorDashboardMobileWidgetResponse ApiDataResponse[MobileWidgetData] diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go index 380d035d4..f5df0b039 100644 --- a/backend/pkg/api/types/validator_dashboard.go +++ b/backend/pkg/api/types/validator_dashboard.go @@ -6,13 +6,6 @@ import ( // ------------------------------------------------------------ // Overview -type VDBOverviewValidators struct { - Online uint64 `json:"online"` - Offline uint64 `json:"offline"` - Pending uint64 `json:"pending"` - Exited uint64 `json:"exited"` - Slashed uint64 `json:"slashed"` -} type VDBOverviewGroup struct { Id uint64 `json:"id"` @@ -30,7 +23,7 @@ type VDBOverviewData struct { Name string `json:"name,omitempty" extensions:"x-order=1"` Network uint64 `json:"network"` Groups []VDBOverviewGroup `json:"groups"` - Validators VDBOverviewValidators `json:"validators"` + Validators ValidatorStateCounts `json:"validators"` Efficiency PeriodicValues[float64] `json:"efficiency"` Rewards PeriodicValues[ClElValue[decimal.Decimal]] `json:"rewards"` Apr PeriodicValues[ClElValue[float64]] `json:"apr"` diff --git a/frontend/types/api/common.ts b/frontend/types/api/common.ts index 3c8c9506a..21cd77662 100644 --- a/frontend/types/api/common.ts +++ b/frontend/types/api/common.ts @@ -117,3 +117,10 @@ export interface IndexBlocks { index: number /* uint64 */; blocks: number /* uint64 */[]; } +export interface ValidatorStateCounts { + online: number /* uint64 */; + offline: number /* uint64 */; + pending: number /* uint64 */; + exited: number /* uint64 */; + slashed: number /* uint64 */; +} diff --git a/frontend/types/api/mobile.ts b/frontend/types/api/mobile.ts index d6b234a18..335bd2aef 100644 --- a/frontend/types/api/mobile.ts +++ b/frontend/types/api/mobile.ts @@ -1,6 +1,6 @@ // Code generated by tygo. DO NOT EDIT. /* eslint-disable */ -import type { ApiDataResponse } from './common' +import type { ApiDataResponse, ValidatorStateCounts } from './common' ////////// // source: mobile.go @@ -10,3 +10,14 @@ export interface MobileBundleData { has_native_update_available: boolean; } export type GetMobileLatestBundleResponse = ApiDataResponse; +export interface MobileWidgetData { + validator_state_counts: ValidatorStateCounts; + last_24h_income: string /* decimal.Decimal */; + last_7d_income: string /* decimal.Decimal */; + last_30d_apr: number /* float64 */; + last_30d_efficiency: string /* decimal.Decimal */; + network_efficiency: number /* float64 */; + rpl_price: string /* decimal.Decimal */; + rpl_apr: number /* float64 */; +} +export type InternalGetValidatorDashboardMobileWidgetResponse = ApiDataResponse; diff --git a/frontend/types/api/validator_dashboard.ts b/frontend/types/api/validator_dashboard.ts index 03f07f548..58bc40cce 100644 --- a/frontend/types/api/validator_dashboard.ts +++ b/frontend/types/api/validator_dashboard.ts @@ -1,21 +1,10 @@ // Code generated by tygo. DO NOT EDIT. /* eslint-disable */ -import type { PeriodicValues, ClElValue, ChartHistorySeconds, ApiDataResponse, StatusCount, ApiPagingResponse, Luck, ChartData, ValidatorHistoryDuties, Address, PubKey, Hash, PercentageDetails } from './common' +import type { ValidatorStateCounts, PeriodicValues, ClElValue, ChartHistorySeconds, ApiDataResponse, StatusCount, ApiPagingResponse, Luck, ChartData, ValidatorHistoryDuties, Address, PubKey, Hash, PercentageDetails } from './common' ////////// // source: validator_dashboard.go -/** - * ------------------------------------------------------------ - * Overview - */ -export interface VDBOverviewValidators { - online: number /* uint64 */; - offline: number /* uint64 */; - pending: number /* uint64 */; - exited: number /* uint64 */; - slashed: number /* uint64 */; -} export interface VDBOverviewGroup { id: number /* uint64 */; name: string; @@ -30,7 +19,7 @@ export interface VDBOverviewData { name?: string; network: number /* uint64 */; groups: VDBOverviewGroup[]; - validators: VDBOverviewValidators; + validators: ValidatorStateCounts; efficiency: PeriodicValues; rewards: PeriodicValues>; apr: PeriodicValues>; From 62d7b22b3c121f550745d2eb2e3ab83ee8d21bfd Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:48:58 +0200 Subject: [PATCH 138/187] (BEDS-501) add enabled flag to all thresholds (#870) --- backend/pkg/api/handlers/common.go | 7 ++--- backend/pkg/api/types/notifications.go | 31 ++++++++++++------- .../playground/PlaygroundDialog.vue | 1 + frontend/types/api/notifications.ts | 29 +++++++++++------ frontend/utils/mock.ts | 5 +++ 5 files changed, 47 insertions(+), 26 deletions(-) diff --git a/backend/pkg/api/handlers/common.go b/backend/pkg/api/handlers/common.go index a2c68cf24..ca7c41bb1 100644 --- a/backend/pkg/api/handlers/common.go +++ b/backend/pkg/api/handlers/common.go @@ -2,6 +2,7 @@ package handlers import ( "bytes" + "cmp" "context" "encoding/base64" "encoding/json" @@ -477,11 +478,7 @@ func (v *validationError) checkValidatorDashboardPublicId(publicId string) types return types.VDBIdPublic(v.checkRegex(reValidatorDashboardPublicId, publicId, "public_dashboard_id")) } -type number interface { - uint64 | int64 | float64 -} - -func checkMinMax[T number](v *validationError, param T, min T, max T, paramName string) T { +func checkMinMax[T cmp.Ordered](v *validationError, param T, min T, max T, paramName string) T { if param < min { v.add(paramName, fmt.Sprintf("given value '%v' is too small, minimum value is %v", param, min)) } diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index f183988b1..27bf585be 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -141,9 +141,12 @@ type InternalGetUserNotificationNetworksResponse ApiPagingResponse[NotificationN // ------------------------------------------------------------ // Notification Settings type NotificationSettingsNetwork struct { - GasAboveThreshold decimal.Decimal `json:"gas_above_threshold" faker:"eth"` // 0 is disabled - GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"eth"` // 0 is disabled - ParticipationRateThreshold float64 `json:"participation_rate_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled + IsGasAboveSubscribed bool `json:"is_gas_above_subscribed"` + GasAboveThreshold decimal.Decimal `json:"gas_above_threshold" faker:"eth"` + IsGasBelowSubscribed bool `json:"is_gas_below_subscribed"` + GasBelowThreshold decimal.Decimal `json:"gas_below_threshold" faker:"eth"` + IsParticipationRateSubscribed bool `json:"is_participation_rate_subscribed"` + ParticipationRateThreshold float64 `json:"participation_rate_threshold" faker:"boundary_start=0, boundary_end=1"` } type NotificationNetwork struct { ChainId uint64 `json:"chain_id"` @@ -164,15 +167,20 @@ type NotificationSettingsGeneral struct { IsEmailNotificationsEnabled bool `json:"is_email_notifications_enabled"` IsPushNotificationsEnabled bool `json:"is_push_notifications_enabled"` - IsMachineOfflineSubscribed bool `json:"is_machine_offline_subscribed"` - MachineStorageUsageThreshold float64 `json:"machine_storage_usage_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - MachineCpuUsageThreshold float64 `json:"machine_cpu_usage_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - MachineMemoryUsageThreshold float64 `json:"machine_memory_usage_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled + IsMachineOfflineSubscribed bool `json:"is_machine_offline_subscribed"` + IsMachineStorageUsageSubscribed bool `json:"is_machine_storage_usage_subscribed"` + MachineStorageUsageThreshold float64 `json:"machine_storage_usage_threshold" faker:"boundary_start=0, boundary_end=1"` + IsMachineCpuUsageSubscribed bool `json:"is_machine_cpu_usage_subscribed"` + MachineCpuUsageThreshold float64 `json:"machine_cpu_usage_threshold" faker:"boundary_start=0, boundary_end=1"` + IsMachineMemoryUsageSubscribed bool `json:"is_machine_memory_usage_subscribed"` + MachineMemoryUsageThreshold float64 `json:"machine_memory_usage_threshold" faker:"boundary_start=0, boundary_end=1"` SubscribedClients []string `json:"subscribed_clients"` IsRocketPoolNewRewardRoundSubscribed bool `json:"is_rocket_pool_new_reward_round_subscribed"` - RocketPoolMaxCollateralThreshold float64 `json:"rocket_pool_max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled - RocketPoolMinCollateralThreshold float64 `json:"rocket_pool_min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 means disabled + IsRocketPoolMaxCollateralSubscribed bool `json:"is_rocket_pool_max_collateral_subscribed"` + RocketPoolMaxCollateralThreshold float64 `json:"rocket_pool_max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` + IsRocketPoolMinCollateralSubscribed bool `json:"is_rocket_pool_min_collateral_subscribed"` + RocketPoolMinCollateralThreshold float64 `json:"rocket_pool_min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` } type InternalPutUserNotificationSettingsGeneralResponse ApiDataResponse[NotificationSettingsGeneral] type NotificationSettings struct { @@ -188,7 +196,8 @@ type NotificationSettingsValidatorDashboard struct { IsRealTimeModeEnabled bool `json:"is_real_time_mode_enabled"` IsValidatorOfflineSubscribed bool `json:"is_validator_offline_subscribed"` - GroupOfflineThreshold float64 `json:"group_offline_threshold" faker:"boundary_start=0, boundary_end=1"` // 0 is disabled + IsGroupOfflineSubscribed bool `json:"is_group_offline_subscribed"` + GroupOfflineThreshold float64 `json:"group_offline_threshold" faker:"boundary_start=0, boundary_end=1"` IsAttestationsMissedSubscribed bool `json:"is_attestations_missed_subscribed"` IsBlockProposalSubscribed bool `json:"is_block_proposal_subscribed"` IsUpcomingBlockProposalSubscribed bool `json:"is_upcoming_block_proposal_subscribed"` @@ -208,7 +217,7 @@ type NotificationSettingsAccountDashboard struct { IsIncomingTransactionsSubscribed bool `json:"is_incoming_transactions_subscribed"` IsOutgoingTransactionsSubscribed bool `json:"is_outgoing_transactions_subscribed"` IsERC20TokenTransfersSubscribed bool `json:"is_erc20_token_transfers_subscribed"` - ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold" faker:"boundary_start=0, boundary_end=1000000"` // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled + ERC20TokenTransfersValueThreshold float64 `json:"erc20_token_transfers_value_threshold" faker:"boundary_start=0, boundary_end=1000000"` IsERC721TokenTransfersSubscribed bool `json:"is_erc721_token_transfers_subscribed"` IsERC1155TokenTransfersSubscribed bool `json:"is_erc1155_token_transfers_subscribed"` } diff --git a/frontend/components/playground/PlaygroundDialog.vue b/frontend/components/playground/PlaygroundDialog.vue index 72c8d4082..b9d2c3c35 100644 --- a/frontend/components/playground/PlaygroundDialog.vue +++ b/frontend/components/playground/PlaygroundDialog.vue @@ -34,6 +34,7 @@ const validatorSub: NotificationSettingsValidatorDashboard = { group_offline_threshold: 0, // means "deactivated/unchecked" is_attestations_missed_subscribed: true, is_block_proposal_subscribed: true, + is_group_offline_subscribed: true, is_real_time_mode_enabled: false, is_slashed_subscribed: false, is_sync_subscribed: true, diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index e22be1d6a..0d9f53898 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -137,9 +137,12 @@ export type InternalGetUserNotificationNetworksResponse = ApiPagingResponse; export interface NotificationSettings { @@ -178,7 +186,8 @@ export interface NotificationSettingsValidatorDashboard { is_webhook_discord_enabled: boolean; is_real_time_mode_enabled: boolean; is_validator_offline_subscribed: boolean; - group_offline_threshold: number /* float64 */; // 0 is disabled + is_group_offline_subscribed: boolean; + group_offline_threshold: number /* float64 */; is_attestations_missed_subscribed: boolean; is_block_proposal_subscribed: boolean; is_upcoming_block_proposal_subscribed: boolean; @@ -195,7 +204,7 @@ export interface NotificationSettingsAccountDashboard { is_incoming_transactions_subscribed: boolean; is_outgoing_transactions_subscribed: boolean; is_erc20_token_transfers_subscribed: boolean; - erc20_token_transfers_value_threshold: number /* float64 */; // 0 does not disable, is_erc20_token_transfers_subscribed determines if it's enabled + erc20_token_transfers_value_threshold: number /* float64 */; is_erc721_token_transfers_subscribed: boolean; is_erc1155_token_transfers_subscribed: boolean; } diff --git a/frontend/utils/mock.ts b/frontend/utils/mock.ts index e1c25fef2..f942d8fbb 100644 --- a/frontend/utils/mock.ts +++ b/frontend/utils/mock.ts @@ -446,8 +446,13 @@ export function mockManageNotificationsGeneral(): InternalGetUserNotificationSet general_settings: { do_not_disturb_timestamp: 9000, is_email_notifications_enabled: false, + is_machine_cpu_usage_subscribed: true, + is_machine_memory_usage_subscribed: true, is_machine_offline_subscribed: true, + is_machine_storage_usage_subscribed: true, is_push_notifications_enabled: true, + is_rocket_pool_max_collateral_subscribed: true, + is_rocket_pool_min_collateral_subscribed: true, is_rocket_pool_new_reward_round_subscribed: true, machine_cpu_usage_threshold: 40, machine_memory_usage_threshold: 50, From f9aecaf7bdd9f5cd63f20c6bf096a92bac6326bd Mon Sep 17 00:00:00 2001 From: LUCCA DUKIC <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:58:52 +0200 Subject: [PATCH 139/187] (BEDS-452) move clients in notification settings --- backend/pkg/api/types/notifications.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 27bf585be..5d80efc89 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -162,6 +162,12 @@ type NotificationPairedDevice struct { } type InternalPutUserNotificationSettingsPairedDevicesResponse ApiDataResponse[NotificationPairedDevice] +type NotificationSettingsClients struct { + Id uint64 `json:"id"` + Name string `json:"name"` + Category string `json:"category"` + IsSubscribed bool `json:"is_subscribed"` +} type NotificationSettingsGeneral struct { DoNotDisturbTimestamp int64 `json:"do_not_disturb_timestamp"` // notifications are disabled until this timestamp IsEmailNotificationsEnabled bool `json:"is_email_notifications_enabled"` @@ -175,18 +181,18 @@ type NotificationSettingsGeneral struct { IsMachineMemoryUsageSubscribed bool `json:"is_machine_memory_usage_subscribed"` MachineMemoryUsageThreshold float64 `json:"machine_memory_usage_threshold" faker:"boundary_start=0, boundary_end=1"` - SubscribedClients []string `json:"subscribed_clients"` - IsRocketPoolNewRewardRoundSubscribed bool `json:"is_rocket_pool_new_reward_round_subscribed"` - IsRocketPoolMaxCollateralSubscribed bool `json:"is_rocket_pool_max_collateral_subscribed"` - RocketPoolMaxCollateralThreshold float64 `json:"rocket_pool_max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` - IsRocketPoolMinCollateralSubscribed bool `json:"is_rocket_pool_min_collateral_subscribed"` - RocketPoolMinCollateralThreshold float64 `json:"rocket_pool_min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` + IsRocketPoolNewRewardRoundSubscribed bool `json:"is_rocket_pool_new_reward_round_subscribed"` + IsRocketPoolMaxCollateralSubscribed bool `json:"is_rocket_pool_max_collateral_subscribed"` + RocketPoolMaxCollateralThreshold float64 `json:"rocket_pool_max_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` + IsRocketPoolMinCollateralSubscribed bool `json:"is_rocket_pool_min_collateral_subscribed"` + RocketPoolMinCollateralThreshold float64 `json:"rocket_pool_min_collateral_threshold" faker:"boundary_start=0, boundary_end=1"` } type InternalPutUserNotificationSettingsGeneralResponse ApiDataResponse[NotificationSettingsGeneral] type NotificationSettings struct { - GeneralSettings NotificationSettingsGeneral `json:"general_settings"` - Networks []NotificationNetwork `json:"networks"` - PairedDevices []NotificationPairedDevice `json:"paired_devices"` + GeneralSettings NotificationSettingsGeneral `json:"general_settings"` + Networks []NotificationNetwork `json:"networks"` + PairedDevices []NotificationPairedDevice `json:"paired_devices"` + Clients []NotificationSettingsClients `json:"clients"` } type InternalGetUserNotificationSettingsResponse ApiDataResponse[NotificationSettings] From d0a011c6dc740ce3673464078e94cfc3afedb0dc Mon Sep 17 00:00:00 2001 From: LUCCA DUKIC <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:29:45 +0200 Subject: [PATCH 140/187] (BEDS-452) add new client notification settings endpoint --- backend/pkg/api/data_access/dummy.go | 5 + backend/pkg/api/data_access/notifications.go | 4 + backend/pkg/api/handlers/internal.go | 4 + backend/pkg/api/handlers/public.go | 122 +++++++++++++------ backend/pkg/api/router.go | 1 + backend/pkg/api/types/notifications.go | 13 +- 6 files changed, 104 insertions(+), 45 deletions(-) diff --git a/backend/pkg/api/data_access/dummy.go b/backend/pkg/api/data_access/dummy.go index 7bee9a38c..38fb693ab 100644 --- a/backend/pkg/api/data_access/dummy.go +++ b/backend/pkg/api/data_access/dummy.go @@ -488,6 +488,11 @@ func (d *DummyService) UpdateNotificationSettingsPairedDevice(ctx context.Contex func (d *DummyService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { return nil } + +func (d *DummyService) UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) { + return getDummyStruct[t.NotificationSettingsClient]() +} + func (d *DummyService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { r, p, err := getDummyWithPaging[t.NotificationSettingsDashboardsTableRow]() for i, n := range r { diff --git a/backend/pkg/api/data_access/notifications.go b/backend/pkg/api/data_access/notifications.go index 5389847db..44f7ca4a5 100644 --- a/backend/pkg/api/data_access/notifications.go +++ b/backend/pkg/api/data_access/notifications.go @@ -25,6 +25,7 @@ type NotificationsRepository interface { UpdateNotificationSettingsNetworks(ctx context.Context, userId uint64, chainId uint64, settings t.NotificationSettingsNetwork) error UpdateNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string, name string, IsNotificationsEnabled bool) error DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error + UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) UpdateNotificationSettingsValidatorDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsValidatorDashboard) error UpdateNotificationSettingsAccountDashboard(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, settings t.NotificationSettingsAccountDashboard) error @@ -72,6 +73,9 @@ func (d *DataAccessService) UpdateNotificationSettingsPairedDevice(ctx context.C func (d *DataAccessService) DeleteNotificationSettingsPairedDevice(ctx context.Context, userId uint64, pairedDeviceId string) error { return d.dummy.DeleteNotificationSettingsPairedDevice(ctx, userId, pairedDeviceId) } +func (d *DataAccessService) UpdateNotificationSettingsClients(ctx context.Context, userId uint64, clientId uint64, IsSubscribed bool) (*t.NotificationSettingsClient, error) { + return d.dummy.UpdateNotificationSettingsClients(ctx, userId, clientId, IsSubscribed) +} func (d *DataAccessService) GetNotificationSettingsDashboards(ctx context.Context, userId uint64, cursor string, colSort t.Sort[enums.NotificationSettingsDashboardColumn], search string, limit uint64) ([]t.NotificationSettingsDashboardsTableRow, *t.Paging, error) { return d.dummy.GetNotificationSettingsDashboards(ctx, userId, cursor, colSort, search, limit) } diff --git a/backend/pkg/api/handlers/internal.go b/backend/pkg/api/handlers/internal.go index 78d998856..bf3e347fe 100644 --- a/backend/pkg/api/handlers/internal.go +++ b/backend/pkg/api/handlers/internal.go @@ -609,6 +609,10 @@ func (h *HandlerService) InternalDeleteUserNotificationSettingsPairedDevices(w h h.PublicDeleteUserNotificationSettingsPairedDevices(w, r) } +func (h *HandlerService) InternalPutUserNotificationSettingsClient(w http.ResponseWriter, r *http.Request) { + h.PublicPutUserNotificationSettingsClient(w, r) +} + func (h *HandlerService) InternalGetUserNotificationSettingsDashboards(w http.ResponseWriter, r *http.Request) { h.PublicGetUserNotificationSettingsDashboards(w, r) } diff --git a/backend/pkg/api/handlers/public.go b/backend/pkg/api/handlers/public.go index e0e560b8c..d160582df 100644 --- a/backend/pkg/api/handlers/public.go +++ b/backend/pkg/api/handlers/public.go @@ -271,7 +271,7 @@ func (h *HandlerService) PublicGetValidatorDashboard(w http.ResponseWriter, r *h // @Security ApiKeyInHeader || ApiKeyInQuery // @Tags Validator Dashboard Management // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Success 204 "Dashboard deleted successfully." // @Failure 400 {object} types.ApiErrorResponse "Bad Request" // @Router /validator-dashboards/{dashboard_id} [delete] @@ -297,7 +297,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboard(w http.ResponseWriter, r // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPutValidatorDashboardName.request true "request" // @Success 200 {object} types.ApiDataResponse[types.VDBPostReturnData] // @Failure 400 {object} types.ApiErrorResponse @@ -336,7 +336,7 @@ func (h *HandlerService) PublicPutValidatorDashboardName(w http.ResponseWriter, // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPostValidatorDashboardGroups.request true "request" // @Success 201 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] // @Failure 400 {object} types.ApiErrorResponse @@ -400,8 +400,8 @@ func (h *HandlerService) PublicPostValidatorDashboardGroups(w http.ResponseWrite // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." // @Param request body handlers.PublicPutValidatorDashboardGroups.request true "request" // @Success 200 {object} types.ApiDataResponse[types.VDBPostCreateGroupData] // @Failure 400 {object} types.ApiErrorResponse @@ -453,8 +453,8 @@ func (h *HandlerService) PublicPutValidatorDashboardGroups(w http.ResponseWriter // @Security ApiKeyInHeader || ApiKeyInQuery // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param group_id path integer true "The ID of the group." // @Success 204 "Group deleted successfully." // @Failure 400 {object} types.ApiErrorResponse // @Router /validator-dashboards/{dashboard_id}/groups/{group_id} [delete] @@ -496,7 +496,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardGroup(w http.ResponseWrit // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPostValidatorDashboardValidators.request true "`group_id`: (optional) Provide a single group id, to which all validators get added to. If omitted, the default group will be used.

To add validators, only one of the following fields can be set:
  • `validators`: Provide a list of validator indices or public keys to add to the dashboard.
  • `deposit_address`: (limited to subscription tiers with 'Bulk adding') Provide a deposit address from which as many validators as possible will be added to the dashboard.
  • `withdrawal_address`: (limited to subscription tiers with 'Bulk adding') Provide a withdrawal address from which as many validators as possible will be added to the dashboard.
  • `graffiti`: (limited to subscription tiers with 'Bulk adding') Provide a graffiti string from which as many validators as possible will be added to the dashboard.
" // @Success 201 {object} types.ApiDataResponse[[]types.VDBPostValidatorsData] "Returns a list of added validators." // @Failure 400 {object} types.ApiErrorResponse @@ -631,7 +631,7 @@ func (h *HandlerService) PublicPostValidatorDashboardValidators(w http.ResponseW // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id query string false "The ID of the group." +// @Param group_id query integer false "The ID of the group." // @Param limit query string false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(index, public_key, balance, status, withdrawal_credentials) // @Param search query string false "Search for Address, ENS." @@ -672,7 +672,7 @@ func (h *HandlerService) PublicGetValidatorDashboardValidators(w http.ResponseWr // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicDeleteValidatorDashboardValidators.request true "`validators`: Provide an array of validator indices or public keys that should get removed from the dashboard." // @Success 204 "Validators removed successfully." // @Failure 400 {object} types.ApiErrorResponse @@ -714,7 +714,7 @@ func (h *HandlerService) PublicDeleteValidatorDashboardValidators(w http.Respons // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param request body handlers.PublicPostValidatorDashboardPublicIds.request true "`name`: Provide a public name for the dashboard
`share_settings`:
  • `share_groups`: If set to `true`, accessing the dashboard through the public ID will not reveal any group information.
" // @Success 201 {object} types.ApiDataResponse[types.VDBPublicId] // @Failure 400 {object} types.ApiErrorResponse @@ -768,7 +768,7 @@ func (h *HandlerService) PublicPostValidatorDashboardPublicIds(w http.ResponseWr // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param public_id path string true "The ID of the public ID." // @Param request body handlers.PublicPutValidatorDashboardPublicId.request true "`name`: Provide a public name for the dashboard
`share_settings`:
  • `share_groups`: If set to `true`, accessing the dashboard through the public ID will not reveal any group information.
" // @Success 200 {object} types.ApiDataResponse[types.VDBPublicId] @@ -823,7 +823,7 @@ func (h *HandlerService) PublicPutValidatorDashboardPublicId(w http.ResponseWrit // @Security ApiKeyInHeader || ApiKeyInQuery // @Tags Validator Dashboard Management // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." +// @Param dashboard_id path integer true "The ID of the dashboard." // @Param public_id path string true "The ID of the public ID." // @Success 204 "Public ID deleted successfully." // @Failure 400 {object} types.ApiErrorResponse @@ -863,8 +863,8 @@ func (h *HandlerService) PublicDeleteValidatorDashboardPublicId(w http.ResponseW // @Tags Validator Dashboard Management // @Accept json // @Produce json -// @Param dashboard_id path string true "The ID of the dashboard." -// @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "request" +// @Param dashboard_id path integer true "The ID of the dashboard." +// @Param request body handlers.PublicPutValidatorDashboardArchiving.request true "`is_archived`: Set to `true` to archive the dashboard, or `false` to unarchive it." // @Success 200 {object} types.ApiDataResponse[types.VDBPostArchivingReturnData] // @Failure 400 {object} types.ApiErrorResponse // @Failure 409 {object} types.ApiErrorResponse "Conflict. The request could not be performed by the server because the authenticated user has already reached their subscription limit." @@ -1043,7 +1043,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummary(w http.ResponseWrite // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param group_id path integer true "The ID of the group." // @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) // @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." // @Success 200 {object} types.GetValidatorDashboardGroupSummaryResponse @@ -1142,7 +1142,7 @@ func (h *HandlerService) PublicGetValidatorDashboardSummaryChart(w http.Response // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id query string false "The ID of the group." +// @Param group_id query integer false "The ID of the group." // @Param duty query string false "Validator duty to get data for." Enums(none, sync, slashed, proposal) Default(none) // @Param period query string true "Time period to get data for." Enums(all_time, last_30d, last_7d, last_24h, last_1h) // @Success 200 {object} types.GetValidatorDashboardSummaryValidatorsResponse @@ -1246,8 +1246,8 @@ func (h *HandlerService) PublicGetValidatorDashboardRewards(w http.ResponseWrite // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param epoch path string true "The epoch to get data for." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch to get data for." // @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." // @Success 200 {object} types.GetValidatorDashboardGroupRewardsResponse // @Failure 400 {object} types.ApiErrorResponse @@ -1322,8 +1322,8 @@ func (h *HandlerService) PublicGetValidatorDashboardRewardsChart(w http.Response // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param epoch path string true "The epoch to get data for." -// @Param group_id query string false "The ID of the group." +// @Param epoch path integer true "The epoch to get data for." +// @Param group_id query integer false "The ID of the group." // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." // @Param limit query string false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(validator, reward) @@ -1460,8 +1460,8 @@ func (h *HandlerService) PublicGetValidatorDashboardHeatmap(w http.ResponseWrite // @Tags Validator Dashboard // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param timestamp path string true "The timestamp to get data for." +// @Param group_id path integer true "The ID of the group." +// @Param timestamp path integer true "The timestamp to get data for." // @Param modes query string false "Provide a comma separated list of protocol modes which should be respected for validator calculations. Possible values are `rocket_pool``." // @Param aggregation query string false "Aggregation type to get data for." Enums(epoch, hourly, daily, weekly) Default(hourly) // @Success 200 {object} types.GetValidatorDashboardGroupHeatmapResponse @@ -1903,7 +1903,7 @@ func (h *HandlerService) PublicGetUserNotifications(w http.ResponseWriter, r *ht // @Produce json // @Param network query string false "If set, results will be filtered to only include networks given. Provide a comma separated list." // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." " Enums(chain_id, timestamp, dashboard_id) // @Param search query string false "Search for Dashboard, Group" // @Success 200 {object} types.InternalGetUserNotificationDashboardsResponse @@ -1931,7 +1931,7 @@ func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWrit } response := types.InternalGetUserNotificationDashboardsResponse{ Data: data, - Paging: *paging, + Paging: *paging, // @Param epoch path strings } returnOk(w, r, response) } @@ -1943,8 +1943,8 @@ func (h *HandlerService) PublicGetUserNotificationDashboards(w http.ResponseWrit // @Tags Notifications // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param epoch path string true "The epoch of the notification." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch of the notification." // @Success 200 {object} types.InternalGetUserNotificationsValidatorDashboardResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/validator-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] @@ -1976,8 +1976,8 @@ func (h *HandlerService) PublicGetUserNotificationsValidatorDashboard(w http.Res // @Tags Notifications // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." -// @Param epoch path string true "The epoch of the notification." +// @Param group_id path integer true "The ID of the group." +// @Param epoch path integer true "The epoch of the notification." // @Success 200 {object} types.InternalGetUserNotificationsAccountDashboardResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/account-dashboards/{dashboard_id}/groups/{group_id}/epochs/{epoch} [get] @@ -2009,7 +2009,7 @@ func (h *HandlerService) PublicGetUserNotificationsAccountDashboard(w http.Respo // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(machine_name, threshold, event_type, timestamp) // @Param search query string false "Search for Machine" // @Success 200 {object} types.InternalGetUserNotificationMachinesResponse @@ -2048,7 +2048,7 @@ func (h *HandlerService) PublicGetUserNotificationMachines(w http.ResponseWriter // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(client_name, timestamp) // @Param search query string false "Search for Client" // @Success 200 {object} types.InternalGetUserNotificationClientsResponse @@ -2087,7 +2087,7 @@ func (h *HandlerService) PublicGetUserNotificationClients(w http.ResponseWriter, // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type, node_address) // @Param search query string false "Search for TODO" // @Success 200 {object} types.InternalGetUserNotificationRocketPoolResponse @@ -2126,7 +2126,7 @@ func (h *HandlerService) PublicGetUserNotificationRocketPool(w http.ResponseWrit // @Tags Notifications // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums(timestamp, event_type) // @Param search query string false "Search for TODO" // @Success 200 {object} types.InternalGetUserNotificationNetworksResponse @@ -2190,7 +2190,7 @@ func (h *HandlerService) PublicGetUserNotificationSettings(w http.ResponseWriter // @Tags Notification Settings // @Accept json // @Produce json -// @Param request body types.NotificationSettingsGeneral true "Notification settings" +// @Param request body types.NotificationSettingsGeneral true "Description TODO" // @Success 200 {object} types.InternalPutUserNotificationSettingsGeneralResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/settings/general [put] @@ -2211,7 +2211,6 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons checkMinMax(&v, req.MachineMemoryUsageThreshold, 0, 1, "machine_memory_usage_threshold") checkMinMax(&v, req.RocketPoolMaxCollateralThreshold, 0, 1, "rocket_pool_max_collateral_threshold") checkMinMax(&v, req.RocketPoolMinCollateralThreshold, 0, 1, "rocket_pool_min_collateral_threshold") - // TODO: check validity of clients if v.hasErrors() { handleErr(w, r, v) return @@ -2235,7 +2234,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsGeneral(w http.Respons // @Accept json // @Produce json // @Param network path string true "The networks name or chain ID." -// @Param request body types.NotificationSettingsNetwork true "Notification settings" +// @Param request body types.NotificationSettingsNetwork true "Description Todo" // @Success 200 {object} types.InternalPutUserNotificationSettingsNetworksResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/settings/networks/{network} [put] @@ -2280,7 +2279,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsNetworks(w http.Respon // @Accept json // @Produce json // @Param paired_device_id path string true "The paired device ID." -// @Param request body handlers.PublicPutUserNotificationSettingsPairedDevices.request true "Notification settings" +// @Param request body handlers.PublicPutUserNotificationSettingsPairedDevices.request true "Description TODO" // @Success 200 {object} types.InternalPutUserNotificationSettingsPairedDevicesResponse // @Failure 400 {object} types.ApiErrorResponse // @Router /users/me/notifications/settings/paired-devices/{paired_device_id} [put] @@ -2355,6 +2354,49 @@ func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w htt returnNoContent(w, r) } +// PublicPutUserNotificationSettingsClient godoc +// +// @Description Update client notification settings for the authenticated user. When a client is subscribed, notifications will be sent when a new version is available. +// @Security ApiKeyInHeader || ApiKeyInQuery +// @Tags Notification Settings +// @Accept json +// @Produce json +// @Param client_id path integer true "The ID of the client." +// @Param request body handlers.PublicPutUserNotificationSettingsClient.request true "`is_subscribed`: Set to `true` to subscribe to notifications; set to `false` to unsubscribe." +// @Success 200 {object} types.InternalPutUserNotificationSettingsClientResponse +// @Failure 400 {object} types.ApiErrorResponse +// @Router /users/me/notifications/settings/clients/{client_id} [put] +func (h *HandlerService) PublicPutUserNotificationSettingsClient(w http.ResponseWriter, r *http.Request) { + var v validationError + userId, err := GetUserIdByContext(r) + if err != nil { + handleErr(w, r, err) + return + } + type request struct { + IsSubscribed bool `json:"is_subscribed"` + } + var req request + if err := v.checkBody(&req, r); err != nil { + handleErr(w, r, err) + return + } + clientId := v.checkUint(mux.Vars(r)["client_id"], "client_id") + if v.hasErrors() { + handleErr(w, r, v) + return + } + data, err := h.dai.UpdateNotificationSettingsClients(r.Context(), userId, clientId, req.IsSubscribed) + if err != nil { + handleErr(w, r, err) + return + } + response := types.InternalPutUserNotificationSettingsClientResponse{ + Data: *data, + } + returnOk(w, r, response) +} + // PublicGetUserNotificationSettingsDashboards godoc // // @Description Get a list of notification settings for the dashboards of the authenticated user. @@ -2362,7 +2404,7 @@ func (h *HandlerService) PublicDeleteUserNotificationSettingsPairedDevices(w htt // @Tags Notification Settings // @Produce json // @Param cursor query string false "Return data for the given cursor value. Pass the `paging.next_cursor`` value of the previous response to navigate to forward, or pass the `paging.prev_cursor`` value of the previous response to navigate to backward." -// @Param limit query string false "The maximum number of results that may be returned." +// @Param limit query integer false "The maximum number of results that may be returned." // @Param sort query string false "The field you want to sort by. Append with `:desc` for descending order." Enums (dashboard_id, group_name) // @Param search query string false "Search for Dashboard, Group" // @Success 200 {object} types.InternalGetUserNotificationSettingsDashboardsResponse @@ -2402,7 +2444,7 @@ func (h *HandlerService) PublicGetUserNotificationSettingsDashboards(w http.Resp // @Accept json // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param group_id path integer true "The ID of the group." // @Param request body types.NotificationSettingsValidatorDashboard true "Notification settings" // @Success 200 {object} types.InternalPutUserNotificationSettingsValidatorDashboardResponse // @Failure 400 {object} types.ApiErrorResponse @@ -2441,7 +2483,7 @@ func (h *HandlerService) PublicPutUserNotificationSettingsValidatorDashboard(w h // @Accept json // @Produce json // @Param dashboard_id path string true "The ID of the dashboard." -// @Param group_id path string true "The ID of the group." +// @Param group_id path integer true "The ID of the group." // @Param request body handlers.PublicPutUserNotificationSettingsAccountDashboard.request true "Notification settings" // @Success 200 {object} types.InternalPutUserNotificationSettingsAccountDashboardResponse // @Failure 400 {object} types.ApiErrorResponse diff --git a/backend/pkg/api/router.go b/backend/pkg/api/router.go index 16ab4c0b2..864cb69d8 100644 --- a/backend/pkg/api/router.go +++ b/backend/pkg/api/router.go @@ -329,6 +329,7 @@ func addNotificationRoutes(hs *handlers.HandlerService, publicRouter, internalRo {http.MethodPut, "/settings/networks/{network}", hs.PublicPutUserNotificationSettingsNetworks, hs.InternalPutUserNotificationSettingsNetworks}, {http.MethodPut, "/settings/paired-devices/{paired_device_id}", hs.PublicPutUserNotificationSettingsPairedDevices, hs.InternalPutUserNotificationSettingsPairedDevices}, {http.MethodDelete, "/settings/paired-devices/{paired_device_id}", hs.PublicDeleteUserNotificationSettingsPairedDevices, hs.InternalDeleteUserNotificationSettingsPairedDevices}, + {http.MethodPut, "/settings/clients/{client_id}", hs.PublicPutUserNotificationSettingsClient, hs.InternalPutUserNotificationSettingsClient}, {http.MethodGet, "/settings/dashboards", hs.PublicGetUserNotificationSettingsDashboards, hs.InternalGetUserNotificationSettingsDashboards}, {http.MethodPost, "/test-email", hs.PublicPostUserNotificationsTestEmail, hs.InternalPostUserNotificationsTestEmail}, {http.MethodPost, "/test-push", hs.PublicPostUserNotificationsTestPush, hs.InternalPostUserNotificationsTestPush}, diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 5d80efc89..4d512ff5a 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -162,12 +162,15 @@ type NotificationPairedDevice struct { } type InternalPutUserNotificationSettingsPairedDevicesResponse ApiDataResponse[NotificationPairedDevice] -type NotificationSettingsClients struct { +type NotificationSettingsClient struct { Id uint64 `json:"id"` Name string `json:"name"` Category string `json:"category"` IsSubscribed bool `json:"is_subscribed"` } + +type InternalPutUserNotificationSettingsClientResponse ApiDataResponse[NotificationSettingsClient] + type NotificationSettingsGeneral struct { DoNotDisturbTimestamp int64 `json:"do_not_disturb_timestamp"` // notifications are disabled until this timestamp IsEmailNotificationsEnabled bool `json:"is_email_notifications_enabled"` @@ -189,10 +192,10 @@ type NotificationSettingsGeneral struct { } type InternalPutUserNotificationSettingsGeneralResponse ApiDataResponse[NotificationSettingsGeneral] type NotificationSettings struct { - GeneralSettings NotificationSettingsGeneral `json:"general_settings"` - Networks []NotificationNetwork `json:"networks"` - PairedDevices []NotificationPairedDevice `json:"paired_devices"` - Clients []NotificationSettingsClients `json:"clients"` + GeneralSettings NotificationSettingsGeneral `json:"general_settings"` + Networks []NotificationNetwork `json:"networks"` + PairedDevices []NotificationPairedDevice `json:"paired_devices"` + Clients []NotificationSettingsClient `json:"clients"` } type InternalGetUserNotificationSettingsResponse ApiDataResponse[NotificationSettings] From 1b7d591b99887e569067d4080006ffb59ed28022 Mon Sep 17 00:00:00 2001 From: LUCCA DUKIC <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:30:19 +0200 Subject: [PATCH 141/187] (BEDS-452) typescript conversion --- frontend/types/api/notifications.ts | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index 0d9f53898..57a2900d4 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -156,6 +156,13 @@ export interface NotificationPairedDevice { is_notifications_enabled: boolean; } export type InternalPutUserNotificationSettingsPairedDevicesResponse = ApiDataResponse; +export interface NotificationSettingsClient { + id: number /* uint64 */; + name: string; + category: string; + is_subscribed: boolean; +} +export type InternalPutUserNotificationSettingsClientResponse = ApiDataResponse; export interface NotificationSettingsGeneral { do_not_disturb_timestamp: number /* int64 */; // notifications are disabled until this timestamp is_email_notifications_enabled: boolean; @@ -167,7 +174,6 @@ export interface NotificationSettingsGeneral { machine_cpu_usage_threshold: number /* float64 */; is_machine_memory_usage_subscribed: boolean; machine_memory_usage_threshold: number /* float64 */; - subscribed_clients: string[]; is_rocket_pool_new_reward_round_subscribed: boolean; is_rocket_pool_max_collateral_subscribed: boolean; rocket_pool_max_collateral_threshold: number /* float64 */; @@ -179,6 +185,7 @@ export interface NotificationSettings { general_settings: NotificationSettingsGeneral; networks: NotificationNetwork[]; paired_devices: NotificationPairedDevice[]; + clients: NotificationSettingsClient[]; } export type InternalGetUserNotificationSettingsResponse = ApiDataResponse; export interface NotificationSettingsValidatorDashboard { From f29cd091cd21b671a4ba63d7b94b83e10e5322ad Mon Sep 17 00:00:00 2001 From: LUCCA DUKIC <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:48:14 +0200 Subject: [PATCH 142/187] (BEDS-452) defeat frontend typecheck --- frontend/utils/mock.ts | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/frontend/utils/mock.ts b/frontend/utils/mock.ts index f942d8fbb..0f5e8bc6e 100644 --- a/frontend/utils/mock.ts +++ b/frontend/utils/mock.ts @@ -443,6 +443,32 @@ export function simulateAPIresponseAboutNetworkList(): ApiDataResponse< export function mockManageNotificationsGeneral(): InternalGetUserNotificationSettingsResponse { return { data: { + clients: [ + { + category: 'EL', + id: 1, + is_subscribed: true, + name: 'EL Client 1', + }, + { + category: 'CL', + id: 2, + is_subscribed: false, + name: 'CL Client 1', + }, + { + category: 'other', + id: 3, + is_subscribed: true, + name: 'Other Clien 1', + }, + { + category: 'other', + id: 4, + is_subscribed: false, + name: 'Other Clien 2', + }, + ], general_settings: { do_not_disturb_timestamp: 9000, is_email_notifications_enabled: false, @@ -459,7 +485,6 @@ export function mockManageNotificationsGeneral(): InternalGetUserNotificationSet machine_storage_usage_threshold: 80, rocket_pool_max_collateral_threshold: 29823, rocket_pool_min_collateral_threshold: 123, - subscribed_clients: [], }, networks: [], paired_devices: [ From 17918e6e95e069471977143416185e184619abe0 Mon Sep 17 00:00:00 2001 From: Lucca <109136188+LuccaBitfly@users.noreply.github.com> Date: Thu, 19 Sep 2024 13:12:31 +0200 Subject: [PATCH 143/187] (BEDS-477) add url to clients notification table (#872) --- backend/pkg/api/types/notifications.go | 1 + frontend/types/api/notifications.ts | 1 + 2 files changed, 2 insertions(+) diff --git a/backend/pkg/api/types/notifications.go b/backend/pkg/api/types/notifications.go index 27bf585be..af31fd56a 100644 --- a/backend/pkg/api/types/notifications.go +++ b/backend/pkg/api/types/notifications.go @@ -111,6 +111,7 @@ type InternalGetUserNotificationMachinesResponse ApiPagingResponse[NotificationM type NotificationClientsTableRow struct { ClientName string `json:"client_name"` Version string `json:"version"` + Url string `json:"url"` Timestamp int64 `json:"timestamp"` } diff --git a/frontend/types/api/notifications.ts b/frontend/types/api/notifications.ts index 0d9f53898..1ff93b79b 100644 --- a/frontend/types/api/notifications.ts +++ b/frontend/types/api/notifications.ts @@ -107,6 +107,7 @@ export type InternalGetUserNotificationMachinesResponse = ApiPagingResponse; From e0f0a76c6a5c0c6ec2f3eafe0e5c3bff79c069a0 Mon Sep 17 00:00:00 2001 From: benji-bitfly Date: Fri, 20 Sep 2024 08:48:40 +0200 Subject: [PATCH 144/187] feat: add `notifications overview` See: BEDS-335 --- frontend/components/BcFeatureFlag.vue | 35 +++ frontend/components/bc/BcButtonText.vue | 19 ++ .../notifications/NotificationsOverview.vue | 247 ++++++++++++++++++ frontend/locales/en.json | 30 ++- frontend/pages/notifications.vue | 4 +- .../useNotificationsOverviewStore.ts | 35 +++ frontend/types/customFetch.ts | 6 + 7 files changed, 374 insertions(+), 2 deletions(-) create mode 100644 frontend/components/BcFeatureFlag.vue create mode 100644 frontend/components/bc/BcButtonText.vue create mode 100644 frontend/components/notifications/NotificationsOverview.vue create mode 100644 frontend/stores/notifications/useNotificationsOverviewStore.ts diff --git a/frontend/components/BcFeatureFlag.vue b/frontend/components/BcFeatureFlag.vue new file mode 100644 index 000000000..53c6c6673 --- /dev/null +++ b/frontend/components/BcFeatureFlag.vue @@ -0,0 +1,35 @@ + + + + + diff --git a/frontend/components/bc/BcButtonText.vue b/frontend/components/bc/BcButtonText.vue new file mode 100644 index 000000000..60eb5acc1 --- /dev/null +++ b/frontend/components/bc/BcButtonText.vue @@ -0,0 +1,19 @@ + + + + + diff --git a/frontend/components/notifications/NotificationsOverview.vue b/frontend/components/notifications/NotificationsOverview.vue new file mode 100644 index 000000000..7eb583cd4 --- /dev/null +++ b/frontend/components/notifications/NotificationsOverview.vue @@ -0,0 +1,247 @@ + + + + + diff --git a/frontend/locales/en.json b/frontend/locales/en.json index 302cfa091..9a989b149 100644 --- a/frontend/locales/en.json +++ b/frontend/locales/en.json @@ -20,6 +20,7 @@ }, "common": { "account": "Account | Accounts", + "active": "Active", "address": "Address | Addresses", "age": "Age", "and_more": "and {count} more", @@ -34,6 +35,7 @@ "deselected": "Not selected", "earned": "Earned", "edit": "Edit", + "email": "Email", "ens_address": "ENS address | ENS addresses", "epoch": "Epoch | Epochs", "erc20token": "ERC-20 Token | ERC-20 Tokens", @@ -45,6 +47,7 @@ "graffiti": "Graffiti | Graffitis", "id": "ID", "in_day": "In one day | In {count} days", + "inactive": "Inactive", "index": "Index", "last_x": "Last {duration}", "live": "Live", @@ -68,8 +71,12 @@ "true": "True", "tx_batch": "Tx Batch | Tx Batches", "unavailable": "Unavailable", + "units": { + "per_day": "per day" + }, "upcoming": "Upcoming", - "validator": "Validator | Validators" + "validator": "Validator | Validators", + "webhook": "Webhook" }, "cookies": { "accept_all": "Accept All", @@ -692,6 +699,27 @@ "subscriptions": "Network ({count} Subscription) | Validators ({count} Subscriptions)" } }, + "overview": { + "email_activate": "Click here to activate Email notifications", + "email_tooltip": "Your current limit is { limit } emails per day. Your email limit resets in { hours } hours. Upgrade to premium for more.", + "headers": { + "account_groups": "Most notified account groups", + "email_notifications": "Email Notifications", + "most_notifications_24h": "Most Notifications in 24h", + "most_notifications_30d": "Most Notifications in 30d", + "push_notifications": "Push Notifications", + "validator_groups": "Most notified validator groups" + }, + "notifications_activate_premium": { + "_link": "here", + "template": "Click {_link} to activate" + }, + "notifications_download_app": { + "_link": "mobile app", + "template": "Download the {_link} to activate" + }, + "push": "Push" + }, "subscriptions": { "accounts": { "all": { diff --git a/frontend/pages/notifications.vue b/frontend/pages/notifications.vue index 5d591e793..d1701d51d 100644 --- a/frontend/pages/notifications.vue +++ b/frontend/pages/notifications.vue @@ -84,7 +84,9 @@ const openManageNotifications = () => { { + const data = ref() + return { data } +}) + +export function useNotificationsDashboardOverviewStore() { + const { fetch } = useCustomFetch() + const { data: overview } = storeToRefs(notificationsOverviewStore()) + + async function refreshOverview() { + try { + const res = await fetch( + API_PATH.NOTIFICATIONS_OVERVIEW, + ) + overview.value = res.data + + return overview.value + } + catch (e) { + overview.value = undefined + throw e + } + } + + return { + overview, + refreshOverview, + } +} diff --git a/frontend/types/customFetch.ts b/frontend/types/customFetch.ts index 18b2d3ea1..b7cf641b4 100644 --- a/frontend/types/customFetch.ts +++ b/frontend/types/customFetch.ts @@ -45,6 +45,7 @@ export enum API_PATH { NOTIFICATIONS_MACHINE = '/notifications/machines', NOTIFICATIONS_MANAGEMENT_GENERAL = '/notifications/managementGeneral', NOTIFICATIONS_NETWORK = '/notifications/networks', + NOTIFICATIONS_OVERVIEW = '/notifications', NOTIFICATIONS_TEST_EMAIL = '/notifications/test_email', NOTIFICATIONS_TEST_PUSH = '/notifications/test_push', NOTIFICATIONS_TEST_WEBHOOK = '/users/me/notifications/test-webhook', @@ -298,6 +299,11 @@ export const mapping: Record = { [API_PATH.NOTIFICATIONS_NETWORK]: { path: '/users/me/notifications/networks', }, + [API_PATH.NOTIFICATIONS_OVERVIEW]: { + method: 'GET', + mock: false, + path: '/users/me/notifications', + }, [API_PATH.NOTIFICATIONS_TEST_EMAIL]: { method: 'POST', mock: true, From cdffa1ef8f5c23a19200c6ea1e2108494f1d2ff5 Mon Sep 17 00:00:00 2001 From: MarcelBitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Mon, 9 Sep 2024 18:08:28 +0200 Subject: [PATCH 145/187] chore(eslint): add rule to sort `conventional commit scopes` --- frontend/eslint.config.mjs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/frontend/eslint.config.mjs b/frontend/eslint.config.mjs index 08474dd9e..ec277885d 100644 --- a/frontend/eslint.config.mjs +++ b/frontend/eslint.config.mjs @@ -114,6 +114,16 @@ export default withNuxt({ ...eslintPluginJsonc.configs['flat/recommended-with-json'], { rules: { + 'jsonc/sort-array-values': [ + 'warn', + { + order: { + natural: true, + type: 'asc', + }, + pathPattern: 'conventionalCommits.scopes', + }, + ], 'jsonc/sort-keys': [ 'error', 'asc', From 9c38c2f91db699e344f13e9c89efc8dd6172511d Mon Sep 17 00:00:00 2001 From: MarcelBitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Fri, 20 Sep 2024 12:47:10 +0200 Subject: [PATCH 146/187] refactor(feature-flags): manage via `composable` With a `composable` `feature flags` can be used in nearly every `nuxt function`. --- frontend/.vscode/settings.json | 10 ++++---- frontend/components/BcFeatureFlag.vue | 29 ++++------------------ frontend/composables/useFeatureFlag.ts | 33 ++++++++++++++++++++++++++ frontend/types/feature-flags.ts | 6 +++++ 4 files changed, 49 insertions(+), 29 deletions(-) create mode 100644 frontend/composables/useFeatureFlag.ts create mode 100644 frontend/types/feature-flags.ts diff --git a/frontend/.vscode/settings.json b/frontend/.vscode/settings.json index a97dbcf2a..e5ba8997f 100644 --- a/frontend/.vscode/settings.json +++ b/frontend/.vscode/settings.json @@ -1,17 +1,19 @@ { "conventionalCommits.scopes": [ + "DashboardChartSummaryChartFilter", + "DashboardGroupManagementModal", + "DashboardValidatorManagmentModal", "checkout", "ci", "customFetch", - "DashboardChartSummaryChartFilter", - "DashboardGroupManagementModal", "eslint", + "feature-flags", "git", "i18n", "mainHeader", + "notifications", "qrCode", - "vscode", - "DashboardValidatorManagmentModal" + "vscode" ], "editor.codeActionsOnSave": { "source.fixAll.eslint": "always" diff --git a/frontend/components/BcFeatureFlag.vue b/frontend/components/BcFeatureFlag.vue index 53c6c6673..5d69eb435 100644 --- a/frontend/components/BcFeatureFlag.vue +++ b/frontend/components/BcFeatureFlag.vue @@ -1,35 +1,14 @@ diff --git a/frontend/composables/useFeatureFlag.ts b/frontend/composables/useFeatureFlag.ts new file mode 100644 index 000000000..6307dfaf2 --- /dev/null +++ b/frontend/composables/useFeatureFlag.ts @@ -0,0 +1,33 @@ +import { warn } from 'vue' +import type { FeatureFlag } from '~/types/feature-flags' + +export const useFeatureFlag = () => { + type Environment = 'development' | 'production' | 'staging' + + const currentEnvironment = useRuntimeConfig().public.deploymentType as Environment + if (!currentEnvironment) { + warn('Environment variable `deploymentType` is not set.') + } + + const staging: FeatureFlag[] = [ 'feature-notifications' ] + const development: FeatureFlag[] + = [ + ...staging, + 'feature-account_dashboards', + 'feature-user_settings', + ] + const featureCatalog: Record = { + development, + production: [], + staging, + } + + const activeFeatures = featureCatalog[currentEnvironment] + + const has = (feature: FeatureFlag) => activeFeatures.includes(feature) + + return { + activeFeatures, + has, + } +} diff --git a/frontend/types/feature-flags.ts b/frontend/types/feature-flags.ts new file mode 100644 index 000000000..90c2d8d4f --- /dev/null +++ b/frontend/types/feature-flags.ts @@ -0,0 +1,6 @@ +export type FeatureFlag = (typeof FEATURE_FLAGS)[number] +const FEATURE_FLAGS = [ + 'feature-account_dashboards', + 'feature-notifications', + 'feature-user_settings', +] as const From 789fecc2951c8a95fda984d65640d3f5836ad34f Mon Sep 17 00:00:00 2001 From: MarcelBitfly <174338434+marcel-bitfly@users.noreply.github.com> Date: Fri, 20 Sep 2024 13:01:00 +0200 Subject: [PATCH 147/187] feat(notifications): unlock `route` for `staging` --- frontend/components/bc/header/MegaMenu.vue | 1677 ++++++++--------- .../components/dashboard/DashboardHeader.vue | 11 +- frontend/composables/useFeatureFlag.ts | 3 +- frontend/middleware/redirect.global.ts | 11 +- 4 files changed, 820 insertions(+), 882 deletions(-) diff --git a/frontend/components/bc/header/MegaMenu.vue b/frontend/components/bc/header/MegaMenu.vue index abb846647..9af0b4b09 100644 --- a/frontend/components/bc/header/MegaMenu.vue +++ b/frontend/components/bc/header/MegaMenu.vue @@ -1,61 +1,10 @@ +