From 63884842abefc1d2f11df048a858afcd06e436e6 Mon Sep 17 00:00:00 2001
From: benji-bitfly <174338202+benji-bitfly@users.noreply.github.com>
Date: Wed, 13 Nov 2024 11:04:33 +0100
Subject: [PATCH 1/6] refactor: remove `network average`
See: BEDS-641
---
...art.vue => DashboardChartSummaryChart.vue} | 8 ++---
.../DashboardChartSummaryChartFilter.vue | 30 +++++++++----------
frontend/types/dashboard/summary.ts | 2 +-
3 files changed, 20 insertions(+), 20 deletions(-)
rename frontend/components/dashboard/chart/{SummaryChart.vue => DashboardChartSummaryChart.vue} (98%)
diff --git a/frontend/components/dashboard/chart/SummaryChart.vue b/frontend/components/dashboard/chart/DashboardChartSummaryChart.vue
similarity index 98%
rename from frontend/components/dashboard/chart/SummaryChart.vue
rename to frontend/components/dashboard/chart/DashboardChartSummaryChart.vue
index 5b991087f..bbddf293e 100644
--- a/frontend/components/dashboard/chart/SummaryChart.vue
+++ b/frontend/components/dashboard/chart/DashboardChartSummaryChart.vue
@@ -26,7 +26,7 @@ import { formatTsToTime } from '~/utils/format'
import { API_PATH } from '~/types/customFetch'
import {
type AggregationTimeframe,
- SUMMARY_CHART_GROUP_NETWORK_AVERAGE,
+ // SUMMARY_CHART_GROUP_NETWORK_AVERAGE,
SUMMARY_CHART_GROUP_TOTAL,
type SummaryChartFilter,
} from '~/types/dashboard/summary'
@@ -198,9 +198,9 @@ const loadData = async () => {
if (element.id === SUMMARY_CHART_GROUP_TOTAL) {
name = $t('dashboard.validator.summary.chart.total')
}
- else if (element.id === SUMMARY_CHART_GROUP_NETWORK_AVERAGE) {
- name = $t('dashboard.validator.summary.chart.average')
- }
+ // else if (element.id === SUMMARY_CHART_GROUP_NETWORK_AVERAGE) {
+ // name = $t('dashboard.validator.summary.chart.average')
+ // }
else {
name = getGroupLabel($t, element.id, groups.value, allGroups)
}
diff --git a/frontend/components/dashboard/chart/DashboardChartSummaryChartFilter.vue b/frontend/components/dashboard/chart/DashboardChartSummaryChartFilter.vue
index 8d301dc47..735028a9e 100644
--- a/frontend/components/dashboard/chart/DashboardChartSummaryChartFilter.vue
+++ b/frontend/components/dashboard/chart/DashboardChartSummaryChartFilter.vue
@@ -5,7 +5,7 @@ import {
AggregationTimeframes,
type EfficiencyType,
EfficiencyTypes,
- SUMMARY_CHART_GROUP_NETWORK_AVERAGE,
+ // SUMMARY_CHART_GROUP_NETWORK_AVERAGE,
SUMMARY_CHART_GROUP_TOTAL,
type SummaryChartFilter,
} from '~/types/dashboard/summary'
@@ -52,10 +52,10 @@ const total = ref(
!chartFilter.value.initialised
|| chartFilter.value.groupIds.includes(SUMMARY_CHART_GROUP_TOTAL),
)
-const average = ref(
- !chartFilter.value.initialised
- || chartFilter.value.groupIds.includes(SUMMARY_CHART_GROUP_NETWORK_AVERAGE),
-)
+// const average = ref(
+// !chartFilter.value.initialised
+// || chartFilter.value.groupIds.includes(SUMMARY_CHART_GROUP_NETWORK_AVERAGE),
+// )
const groups = computed(() => {
if (!overview.value?.groups) {
return []
@@ -80,20 +80,20 @@ watch(
[
selectedGroups,
total,
- average,
+ // average,
],
([
list,
t,
- a,
+ // a,
]) => {
const groupIds: number[] = [ ...list ]
if (t) {
groupIds.push(SUMMARY_CHART_GROUP_TOTAL)
}
- if (a) {
- groupIds.push(SUMMARY_CHART_GROUP_NETWORK_AVERAGE)
- }
+ // if (a) {
+ // groupIds.push(SUMMARY_CHART_GROUP_NETWORK_AVERAGE)
+ // }
chartFilter.value.groupIds = groupIds
chartFilter.value.initialised = true
},
@@ -120,9 +120,9 @@ const selectedLabel = computed(() => {
'asc',
)
- if (average.value) {
- list.splice(0, 0, $t('dashboard.validator.summary.chart.average'))
- }
+ // if (average.value) {
+ // list.splice(0, 0, $t('dashboard.validator.summary.chart.average'))
+ // }
if (total.value) {
list.splice(0, 0, $t('dashboard.validator.summary.chart.total'))
}
@@ -164,12 +164,12 @@ const selectedLabel = computed(() => {
$t("dashboard.validator.summary.chart.total")
}}
-
+
{{ $t("dashboard.group.selection.all") }}
diff --git a/frontend/types/dashboard/summary.ts b/frontend/types/dashboard/summary.ts
index a632bb3d1..a0c675f43 100644
--- a/frontend/types/dashboard/summary.ts
+++ b/frontend/types/dashboard/summary.ts
@@ -80,7 +80,7 @@ export type SummaryTableVisibility = {
}
export const SUMMARY_CHART_GROUP_TOTAL = -1
-export const SUMMARY_CHART_GROUP_NETWORK_AVERAGE = -2
+// export const SUMMARY_CHART_GROUP_NETWORK_AVERAGE = -2
export type AggregationTimeframe = keyof ChartHistorySeconds
export const AggregationTimeframes: AggregationTimeframe[] = [
From 223b3cdb364cbe2c15072331c553ecae1de3701e Mon Sep 17 00:00:00 2001
From: Patrick
Date: Thu, 14 Nov 2024 11:19:57 +0100
Subject: [PATCH 2/6] chore(rocketpool-exporter): improve logging (#1128)
Co-authored-by: Patrick Pfeiffer <306324+guybrush@users.noreply.github.com>
---
backend/pkg/exporter/modules/rocketpool.go | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/backend/pkg/exporter/modules/rocketpool.go b/backend/pkg/exporter/modules/rocketpool.go
index f6a5d4183..62ca5cf73 100644
--- a/backend/pkg/exporter/modules/rocketpool.go
+++ b/backend/pkg/exporter/modules/rocketpool.go
@@ -269,10 +269,13 @@ func (rp *RocketpoolExporter) DownloadMissingRewardTrees() error {
}
proofWrapper, err := getRewardsData(bytes)
+ if err != nil {
+ return fmt.Errorf("can not parse reward file %v, error: %w", missingInterval.Index, err)
+ }
merkleRootFromFile := common.HexToHash(proofWrapper.MerkleRoot)
if missingInterval.MerkleRoot != merkleRootFromFile {
- return fmt.Errorf("invalid merkle root value : %w", err)
+ return fmt.Errorf("invalid merkle root value: %s != %s", missingInterval.MerkleRoot, merkleRootFromFile)
}
rp.RocketpoolRewardTreesDownloadQueue = append(rp.RocketpoolRewardTreesDownloadQueue, RocketpoolRewardTreeDownloadable{
From 00c380dae093b3622b07202f6e3656b7ac33b307 Mon Sep 17 00:00:00 2001
From: peter <1674920+peterbitfly@users.noreply.github.com>
Date: Thu, 14 Nov 2024 12:24:38 +0000
Subject: [PATCH 3/6] feat(notifications): add metadata to client update
notifications
---
backend/pkg/commons/ethclients/ethclients.go | 8 +++--
backend/pkg/notification/collection.go | 2 ++
backend/pkg/notification/types.go | 31 ++++----------------
3 files changed, 12 insertions(+), 29 deletions(-)
diff --git a/backend/pkg/commons/ethclients/ethclients.go b/backend/pkg/commons/ethclients/ethclients.go
index e4f998e88..1fbf2989f 100644
--- a/backend/pkg/commons/ethclients/ethclients.go
+++ b/backend/pkg/commons/ethclients/ethclients.go
@@ -40,8 +40,10 @@ type gitAPIResponse struct {
}
type clientUpdateInfo struct {
- Name string
- Date time.Time
+ Name string
+ Url string
+ Version string
+ Date time.Time
}
type EthClients struct {
@@ -177,7 +179,7 @@ func prepareEthClientData(repo string, name string, curTime time.Time) (string,
timeDiff := (curTime.Sub(rTime).Hours() / 24.0)
if timeDiff < 1 { // add recent releases for notification collector to be collected
- update := clientUpdateInfo{Name: name, Date: rTime}
+ update := clientUpdateInfo{Name: name, Date: rTime, Url: client.HTMLURL, Version: client.TagName}
bannerClients = append(bannerClients, update)
}
return client.Name, rTime.Unix()
diff --git a/backend/pkg/notification/collection.go b/backend/pkg/notification/collection.go
index 3be7b9d39..abb80de2b 100644
--- a/backend/pkg/notification/collection.go
+++ b/backend/pkg/notification/collection.go
@@ -1403,6 +1403,8 @@ func collectEthClientNotifications(notificationsByUserID types.NotificationsPerU
DashboardGroupName: sub.DashboardGroupName,
},
EthClient: client.Name,
+ Url: client.Url,
+ Version: client.Version,
}
notificationsByUserID.AddNotification(n)
metrics.NotificationsCollected.WithLabelValues(string(n.GetEventName())).Inc()
diff --git a/backend/pkg/notification/types.go b/backend/pkg/notification/types.go
index 30efe1414..5749e9d35 100644
--- a/backend/pkg/notification/types.go
+++ b/backend/pkg/notification/types.go
@@ -423,6 +423,8 @@ type EthClientNotification struct {
types.NotificationBaseImpl
EthClient string
+ Url string
+ Version string
}
func (n *EthClientNotification) GetEntitiyId() string {
@@ -430,37 +432,14 @@ func (n *EthClientNotification) GetEntitiyId() string {
}
func (n *EthClientNotification) GetInfo(format types.NotificationFormat) string {
- clientUrls := map[string]string{
- "Geth": "https://github.com/ethereum/go-ethereum/releases",
- "Nethermind": "https://github.com/NethermindEth/nethermind/releases",
- "Teku": "https://github.com/ConsenSys/teku/releases",
- "Prysm": "https://github.com/prysmaticlabs/prysm/releases",
- "Nimbus": "https://github.com/status-im/nimbus-eth2/releases",
- "Lighthouse": "https://github.com/sigp/lighthouse/releases",
- "Erigon": "https://github.com/erigontech/erigon/releases",
- "Rocketpool": "https://github.com/rocket-pool/smartnode-install/releases",
- "MEV-Boost": "https://github.com/flashbots/mev-boost/releases",
- "Lodestar": "https://github.com/chainsafe/lodestar/releases",
- }
- defaultUrl := "https://beaconcha.in/ethClients"
-
switch format {
case types.NotifciationFormatHtml:
- generalPart := fmt.Sprintf(`A new version for %s is available.`, n.EthClient)
- url := clientUrls[n.EthClient]
- if url == "" {
- url = defaultUrl
- }
- return generalPart + " " + url
+ generalPart := fmt.Sprintf(`A new version %s for %s is available.`, n.Version, n.EthClient)
+ return generalPart + " " + n.Url
case types.NotifciationFormatText:
return fmt.Sprintf(`A new version for %s is available.`, n.EthClient)
case types.NotifciationFormatMarkdown:
- url := clientUrls[n.EthClient]
- if url == "" {
- url = defaultUrl
- }
-
- generalPart := fmt.Sprintf(`A new version for [%s](%s) is available.`, n.EthClient, url)
+ generalPart := fmt.Sprintf(`A new version for [%s](%s) is available.`, n.EthClient, n.Url)
return generalPart
}
From a0400290a38e9e9e12cf258bb0575a651dfa22f1 Mon Sep 17 00:00:00 2001
From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com>
Date: Tue, 12 Nov 2024 12:54:10 +0100
Subject: [PATCH 4/6] feat(BEDS-880): improve queries when adding validators to
the dashboard
-Added batch behaviour and simpler queries
-Fixed missing tx commit
-Added insert count to standard insert
-Get the count of inserted validators
-Get the inserted count for each query
-Get the total amount of rows not just inserted ones
-Removed test comments
-Returned the filled result
-Changed to return pubkeys instead of count
-Changed the return type from pubkey to index
-chore: convert ts types
---
backend/pkg/api/data_access/vdb_management.go | 280 +++++++++---------
backend/pkg/api/types/validator_dashboard.go | 4 +-
frontend/types/api/validator_dashboard.ts | 2 +-
3 files changed, 145 insertions(+), 141 deletions(-)
diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go
index e73fd22c0..e7c1018ce 100644
--- a/backend/pkg/api/data_access/vdb_management.go
+++ b/backend/pkg/api/data_access/vdb_management.go
@@ -6,7 +6,6 @@ import (
"encoding/hex"
"fmt"
"math/big"
- "slices"
"sort"
"strconv"
"strings"
@@ -836,83 +835,68 @@ func (d *DataAccessService) GetValidatorDashboardGroupExists(ctx context.Context
}
func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, validators []t.VDBValidator) ([]t.VDBPostValidatorsData, error) {
+ result := []t.VDBPostValidatorsData{}
+
if len(validators) == 0 {
// No validators to add
return nil, nil
}
- pubkeys := []struct {
- ValidatorIndex t.VDBValidator `db:"validatorindex"`
- Pubkey []byte `db:"pubkey"`
- }{}
-
- addedValidators := []struct {
- ValidatorIndex t.VDBValidator `db:"validator_index"`
- GroupId uint64 `db:"group_id"`
- }{}
-
- // Query to find the pubkey for each validator index
- pubkeysQuery := `
- SELECT
- validatorindex,
- pubkey
- FROM validators
- WHERE validatorindex = ANY($1)
- `
-
- // Query to add the validators to the dashboard and group
- addValidatorsQuery := `
- INSERT INTO users_val_dashboards_validators (dashboard_id, group_id, validator_index)
- VALUES
- `
-
- for idx := range validators {
- addValidatorsQuery += fmt.Sprintf("($1, $2, $%d), ", idx+3)
- }
- addValidatorsQuery = addValidatorsQuery[:len(addValidatorsQuery)-2] // remove trailing comma
-
- // If a validator is already in the dashboard, update the group
- // If the validator is already in that group nothing changes but we will include it in the result anyway
- addValidatorsQuery += `
- ON CONFLICT (dashboard_id, validator_index) DO UPDATE SET
- dashboard_id = EXCLUDED.dashboard_id,
- group_id = EXCLUDED.group_id,
- validator_index = EXCLUDED.validator_index
- RETURNING validator_index, group_id
- `
-
- // Find all the pubkeys
- err := d.alloyReader.SelectContext(ctx, &pubkeys, pubkeysQuery, pq.Array(validators))
+ tx, err := d.userWriter.BeginTxx(ctx, nil)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("error starting db transactions to insert validators for a dashboard: %w", err)
}
+ defer utils.Rollback(tx)
- // Add all the validators to the dashboard and group
- addValidatorsArgsIntf := []interface{}{dashboardId, groupId}
- for _, validatorIndex := range validators {
- addValidatorsArgsIntf = append(addValidatorsArgsIntf, validatorIndex)
- }
- err = d.alloyWriter.SelectContext(ctx, &addedValidators, addValidatorsQuery, addValidatorsArgsIntf...)
- if err != nil {
- return nil, err
- }
+ numArgs := 3
+ batchSize := 65535 / numArgs // max 65535 params per batch, since postgres uses int16 for binding input params
+ batchIdx, allIdx := 0, 0
+ var validatorsToInsert []goqu.Record
+ for _, validatorIdx := range validators {
+ validatorsToInsert = append(validatorsToInsert,
+ goqu.Record{"dashboard_id": dashboardId, "group_id": groupId, "validator_index": validatorIdx})
+
+ batchIdx++
+ allIdx++
+
+ if batchIdx >= batchSize || allIdx >= len(validators) {
+ insertDs := goqu.Dialect("postgres").
+ Insert("users_val_dashboards_validators").
+ Cols("dashboard_id", "group_id", "validator_index").
+ Rows(validatorsToInsert).
+ OnConflict(goqu.DoUpdate(
+ "dashboard_id, validator_index",
+ goqu.Record{
+ "dashboard_id": goqu.L("EXCLUDED.dashboard_id"),
+ "group_id": goqu.L("EXCLUDED.group_id"),
+ "validator_index": goqu.L("EXCLUDED.validator_index"),
+ },
+ ))
+
+ query, args, err := insertDs.Prepared(true).ToSQL()
+ if err != nil {
+ return nil, fmt.Errorf("error preparing query: %w", err)
+ }
- // Combine the pubkeys and group ids for the result
- pubkeysMap := make(map[t.VDBValidator]string, len(pubkeys))
- for _, pubKeyInfo := range pubkeys {
- pubkeysMap[pubKeyInfo.ValidatorIndex] = fmt.Sprintf("%#x", pubKeyInfo.Pubkey)
+ _, err = tx.ExecContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+
+ batchIdx = 0
+ validatorsToInsert = validatorsToInsert[:0]
+ }
}
- addedValidatorsMap := make(map[t.VDBValidator]uint64, len(addedValidators))
- for _, addedValidatorInfo := range addedValidators {
- addedValidatorsMap[addedValidatorInfo.ValidatorIndex] = addedValidatorInfo.GroupId
+ err = tx.Commit()
+ if err != nil {
+ return nil, fmt.Errorf("error committing tx to insert validators for a dashboard: %w", err)
}
- result := []t.VDBPostValidatorsData{}
for _, validator := range validators {
result = append(result, t.VDBPostValidatorsData{
- PublicKey: pubkeysMap[validator],
- GroupId: addedValidatorsMap[validator],
+ Index: validator,
+ GroupId: groupId,
})
}
@@ -922,131 +906,151 @@ func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context,
// Updates the group for validators already in the dashboard linked to the deposit address.
// Adds up to limit new validators associated with the deposit address, if not already in the dashboard.
func (d *DataAccessService) AddValidatorDashboardValidatorsByDepositAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) {
+ result := []t.VDBPostValidatorsData{}
+
addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x"))
if err != nil {
return nil, err
}
- g, gCtx := errgroup.WithContext(ctx)
-
- // fetch validators that are already in the dashboard and associated with the deposit address
- var validatorIndicesToUpdate []uint64
-
- g.Go(func() error {
- return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, `
- SELECT DISTINCT uvdv.validator_index
- FROM validators v
- JOIN eth1_deposits d ON v.pubkey = d.publickey
- JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index
- WHERE uvdv.dashboard_id = $1 AND d.from_address = $2;
- `, dashboardId, addressParsed)
- })
-
- // fetch validators that are not yet in the dashboard and associated with the deposit address, up to the limit
- var validatorIndicesToInsert []uint64
- g.Go(func() error {
- return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, `
- SELECT DISTINCT v.validatorindex
- FROM validators v
- JOIN eth1_deposits d ON v.pubkey = d.publickey
- LEFT JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1
- WHERE d.from_address = $2 AND uvdv.validator_index IS NULL
- ORDER BY v.validatorindex
- LIMIT $3;
- `, dashboardId, addressParsed, limit)
- })
-
- err = g.Wait()
+ uniqueValidatorIndexesQuery := `
+ (SELECT
+ DISTINCT uvdv.validator_index
+ FROM validators v
+ JOIN eth1_deposits d ON v.pubkey = d.publickey
+ JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index
+ WHERE uvdv.dashboard_id = $1 AND d.from_address = $2)
+
+ UNION
+
+ (SELECT
+ DISTINCT v.validatorindex AS validator_index
+ FROM validators v
+ JOIN eth1_deposits d ON v.pubkey = d.publickey
+ LEFT JOIN users_val_dashboards_validators uvdv
+ ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1
+ WHERE d.from_address = $2 AND uvdv.validator_index IS NULL
+ ORDER BY validator_index
+ LIMIT $3)`
+
+ addValidatorsQuery := d.getAddValidatorsQuery(uniqueValidatorIndexesQuery)
+
+ var validators []uint64
+ err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, dashboardId, addressParsed, limit, groupId)
if err != nil {
return nil, err
}
- validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert)
+ for _, validator := range validators {
+ result = append(result, t.VDBPostValidatorsData{
+ Index: validator,
+ GroupId: groupId,
+ })
+ }
- return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices)
+ return result, nil
}
// Updates the group for validators already in the dashboard linked to the withdrawal address.
// Adds up to limit new validators associated with the withdrawal address, if not already in the dashboard.
func (d *DataAccessService) AddValidatorDashboardValidatorsByWithdrawalAddress(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, address string, limit uint64) ([]t.VDBPostValidatorsData, error) {
+ result := []t.VDBPostValidatorsData{}
+
addressParsed, err := hex.DecodeString(strings.TrimPrefix(address, "0x"))
if err != nil {
return nil, err
}
- g, gCtx := errgroup.WithContext(ctx)
-
- // fetch validators that are already in the dashboard and associated with the withdrawal address
- var validatorIndicesToUpdate []uint64
- g.Go(func() error {
- return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, `
- SELECT DISTINCT uvdv.validator_index
+ uniqueValidatorIndexesQuery := `
+ (SELECT
+ DISTINCT uvdv.validator_index
FROM validators v
JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index
- WHERE uvdv.dashboard_id = $1 AND v.withdrawalcredentials = $2;
- `, dashboardId, addressParsed)
- })
+ WHERE uvdv.dashboard_id = $1 AND v.withdrawalcredentials = $2)
+
+ UNION
- // fetch validators that are not yet in the dashboard and associated with the withdrawal address, up to the limit
- var validatorIndicesToInsert []uint64
- g.Go(func() error {
- return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, `
- SELECT DISTINCT v.validatorindex
+ (SELECT
+ DISTINCT v.validatorindex AS validator_index
FROM validators v
- LEFT JOIN users_val_dashboards_validators uvdv ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1
+ LEFT JOIN users_val_dashboards_validators uvdv
+ ON v.validatorindex = uvdv.validator_index AND uvdv.dashboard_id = $1
WHERE v.withdrawalcredentials = $2 AND uvdv.validator_index IS NULL
ORDER BY v.validatorindex
- LIMIT $3;
- `, dashboardId, addressParsed, limit)
- })
+ LIMIT $3)`
+
+ addValidatorsQuery := d.getAddValidatorsQuery(uniqueValidatorIndexesQuery)
- err = g.Wait()
+ var validators []uint64
+ err = d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, dashboardId, addressParsed, limit, groupId)
if err != nil {
return nil, err
}
- validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert)
+ for _, validator := range validators {
+ result = append(result, t.VDBPostValidatorsData{
+ Index: validator,
+ GroupId: groupId,
+ })
+ }
- return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices)
+ return result, nil
}
// Update the group for validators already in the dashboard linked to the graffiti (via produced block).
// Add up to limit new validators associated with the graffiti, if not already in the dashboard.
func (d *DataAccessService) AddValidatorDashboardValidatorsByGraffiti(ctx context.Context, dashboardId t.VDBIdPrimary, groupId uint64, graffiti string, limit uint64) ([]t.VDBPostValidatorsData, error) {
- g, gCtx := errgroup.WithContext(ctx)
+ result := []t.VDBPostValidatorsData{}
- // fetch validators that are already in the dashboard and associated with the graffiti
- var validatorIndicesToUpdate []uint64
- g.Go(func() error {
- return d.readerDb.SelectContext(gCtx, &validatorIndicesToUpdate, `
- SELECT DISTINCT uvdv.validator_index
+ uniqueValidatorIndexesQuery := `
+ (SELECT
+ DISTINCT uvdv.validator_index
FROM blocks b
JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index
- WHERE uvdv.dashboard_id = $1 AND b.graffiti_text = $2;
- `, dashboardId, graffiti)
- })
+ WHERE uvdv.dashboard_id = $1 AND b.graffiti_text = $2)
- // fetch validators that are not yet in the dashboard and associated with the graffiti, up to the limit
- var validatorIndicesToInsert []uint64
- g.Go(func() error {
- return d.readerDb.SelectContext(gCtx, &validatorIndicesToInsert, `
- SELECT DISTINCT b.proposer
+ UNION
+
+ (SELECT DISTINCT b.proposer AS validator_index
FROM blocks b
- LEFT JOIN users_val_dashboards_validators uvdv ON b.proposer = uvdv.validator_index AND uvdv.dashboard_id = $1
+ LEFT JOIN users_val_dashboards_validators uvdv
+ ON b.proposer = uvdv.validator_index AND uvdv.dashboard_id = $1
WHERE b.graffiti_text = $2 AND uvdv.validator_index IS NULL
ORDER BY b.proposer
- LIMIT $3;
- `, dashboardId, graffiti, limit)
- })
+ LIMIT $3)`
+
+ addValidatorsQuery := d.getAddValidatorsQuery(uniqueValidatorIndexesQuery)
- err := g.Wait()
+ var validators []uint64
+ err := d.alloyWriter.SelectContext(ctx, &validators, addValidatorsQuery, dashboardId, graffiti, limit, groupId)
if err != nil {
return nil, err
}
- validatorIndices := slices.Concat(validatorIndicesToUpdate, validatorIndicesToInsert)
+ for _, validator := range validators {
+ result = append(result, t.VDBPostValidatorsData{
+ Index: validator,
+ GroupId: groupId,
+ })
+ }
- return d.AddValidatorDashboardValidators(ctx, dashboardId, groupId, validatorIndices)
+ return result, nil
+}
+
+func (d *DataAccessService) getAddValidatorsQuery(uniqueValidatorIndexesQuery string) string {
+ return fmt.Sprintf(`
+ WITH unique_validator_indexes AS (
+ %s
+ )
+ INSERT INTO users_val_dashboards_validators (dashboard_id, group_id, validator_index)
+ SELECT $1 AS dashboard_id, $4 AS group_id, validator_index
+ FROM unique_validator_indexes
+ ON CONFLICT (dashboard_id, validator_index) DO UPDATE
+ SET
+ dashboard_id = EXCLUDED.dashboard_id,
+ group_id = EXCLUDED.group_id,
+ validator_index = EXCLUDED.validator_index
+ RETURNING validator_index`, uniqueValidatorIndexesQuery)
}
func (d *DataAccessService) RemoveValidatorDashboardValidators(ctx context.Context, dashboardId t.VDBIdPrimary, validators []t.VDBValidator) error {
diff --git a/backend/pkg/api/types/validator_dashboard.go b/backend/pkg/api/types/validator_dashboard.go
index 0b74eb8fd..db3886072 100644
--- a/backend/pkg/api/types/validator_dashboard.go
+++ b/backend/pkg/api/types/validator_dashboard.go
@@ -366,6 +366,6 @@ type VDBPostCreateGroupData struct {
}
type VDBPostValidatorsData struct {
- PublicKey string `json:"public_key"`
- GroupId uint64 `json:"group_id"`
+ Index uint64 `json:"index"`
+ GroupId uint64 `json:"group_id"`
}
diff --git a/frontend/types/api/validator_dashboard.ts b/frontend/types/api/validator_dashboard.ts
index c426f0aea..e85eac671 100644
--- a/frontend/types/api/validator_dashboard.ts
+++ b/frontend/types/api/validator_dashboard.ts
@@ -325,6 +325,6 @@ export interface VDBPostCreateGroupData {
name: string;
}
export interface VDBPostValidatorsData {
- public_key: string;
+ index: number /* uint64 */;
group_id: number /* uint64 */;
}
From 6baf44000c12053ac73eefb04a2218e0338edcea Mon Sep 17 00:00:00 2001
From: Stefan Pletka <124689083+Eisei24@users.noreply.github.com>
Date: Thu, 14 Nov 2024 10:44:33 +0100
Subject: [PATCH 5/6] fix(BEDS-880): Use the correct writer
---
backend/pkg/api/data_access/vdb_management.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/backend/pkg/api/data_access/vdb_management.go b/backend/pkg/api/data_access/vdb_management.go
index e7c1018ce..6b1e6304b 100644
--- a/backend/pkg/api/data_access/vdb_management.go
+++ b/backend/pkg/api/data_access/vdb_management.go
@@ -842,7 +842,7 @@ func (d *DataAccessService) AddValidatorDashboardValidators(ctx context.Context,
return nil, nil
}
- tx, err := d.userWriter.BeginTxx(ctx, nil)
+ tx, err := d.writerDb.BeginTxx(ctx, nil)
if err != nil {
return nil, fmt.Errorf("error starting db transactions to insert validators for a dashboard: %w", err)
}
From f450f755f8aec3fb671c4c43a782ef5a1d154c24 Mon Sep 17 00:00:00 2001
From: peter <1674920+peterbitfly@users.noreply.github.com>
Date: Fri, 15 Nov 2024 07:56:43 +0000
Subject: [PATCH 6/6] fix(notifications): use correct col name in webhook
update query
---
backend/pkg/notification/sending.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/backend/pkg/notification/sending.go b/backend/pkg/notification/sending.go
index 2efc68a99..f96162234 100644
--- a/backend/pkg/notification/sending.go
+++ b/backend/pkg/notification/sending.go
@@ -330,7 +330,7 @@ func sendWebhookNotifications() error {
if n.Content.Webhook.DashboardId == 0 && n.Content.Webhook.DashboardGroupId == 0 {
_, err = db.FrontendWriterDB.Exec(`UPDATE users_webhooks SET retries = retries + 1, last_sent = now(), request = $2, response = $3 WHERE id = $1;`, n.Content.Webhook.ID, n.Content, errResp)
} else {
- _, err = db.WriterDb.Exec(`UPDATE users_val_dashboards_groups SET webhook_retries = retries + 1, webhook_last_sent = now() WHERE id = $1 AND dashboard_id = $2;`, n.Content.Webhook.DashboardGroupId, n.Content.Webhook.DashboardId)
+ _, err = db.WriterDb.Exec(`UPDATE users_val_dashboards_groups SET webhook_retries = webhook_retries + 1, webhook_last_sent = now() WHERE id = $1 AND dashboard_id = $2;`, n.Content.Webhook.DashboardGroupId, n.Content.Webhook.DashboardId)
}
if err != nil {
log.Error(err, "error updating users_webhooks table", 0)