Skip to content

Commit

Permalink
feat(tests): init caches on unit test db
Browse files Browse the repository at this point in the history
  • Loading branch information
yungbender authored and jdobes committed Dec 16, 2022
1 parent 0d3fcf7 commit 92a6803
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 13 deletions.
2 changes: 1 addition & 1 deletion digestwriter/consumer.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ type DigestConsumer struct {
// NewConsumer constructs a new instance of Consumer interface
// specialized in consuming from SHA extractor's result topic
func NewConsumer(storage Storage) (*utils.KafkaConsumer, error) {
setupLogger()
SetupLogger()
processor := DigestConsumer{
storage,
0,
Expand Down
4 changes: 2 additions & 2 deletions digestwriter/digestwriter.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ const (
ExitStatusConsumerError
)

func setupLogger() {
func SetupLogger() {
if logger == nil {
var err error
logger, err = utils.CreateLogger(utils.Cfg.LoggingLevel)
Expand All @@ -49,7 +49,7 @@ func startConsumer(storage Storage) (*utils.KafkaConsumer, error) {

// Start function tries to start the digest writer service.
func Start() {
setupLogger()
SetupLogger()
logger.Infoln("Initializing digest writer...")

RunMetrics()
Expand Down
1 change: 0 additions & 1 deletion digestwriter/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ var (
// functions from consumer.go source file
ExtractDigestsFromMessage = extractDigestsFromMessage
ParseMessage = parseMessage
SetupLogger = setupLogger
)

// kafka-related functions
Expand Down
18 changes: 10 additions & 8 deletions digestwriter/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func prepareClusterImageLists(clusterID int64, currentImageIDs map[int64]struct{
}

// updateClusterCache updates the cache section of cluster row in db
func (storage *DBStorage) updateClusterCache(tx *gorm.DB, clusterID int64, existingDigests []models.Image) error {
func (storage *DBStorage) UpdateClusterCache(tx *gorm.DB, clusterID int64, existingDigests []models.Image) error {
digestIDs := make([]int64, 0, len(existingDigests))
for _, digest := range existingDigests {
digestIDs = append(digestIDs, digest.ID)
Expand Down Expand Up @@ -177,13 +177,15 @@ func (storage *DBStorage) linkDigestsToCluster(tx *gorm.DB, clusterStr string, c
}
}

err := storage.updateClusterCache(tx, clusterID, existingDigests)
if err != nil {
logger.WithFields(logrus.Fields{
errorKey: err.Error(),
clusterIDKey: clusterID,
}).Errorln("couldn't update cluster cve cache")
return err
if len(toInsert) > 0 || len(toDelete) > 0 {
err := storage.UpdateClusterCache(tx, clusterID, existingDigests)
if err != nil {
logger.WithFields(logrus.Fields{
errorKey: err.Error(),
clusterIDKey: clusterID,
}).Errorln("couldn't update cluster cve cache")
return err
}
}

logger.Debugln("linked digests to cluster successfully")
Expand Down
39 changes: 38 additions & 1 deletion test/setup.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
package test

import (
"app/base/models"
"app/digestwriter"
"fmt"
"os"
"path/filepath"
Expand Down Expand Up @@ -69,6 +71,38 @@ func ReverseWalkFindFile(filename string) (string, error) {
return datapath, nil
}

func PopulateClusterCveCache(DB *gorm.DB) error {
digestwriter.SetupLogger()
storage, err := digestwriter.NewStorage()
if err != nil {
return err
}

clusters := []models.Cluster{}
if res := DB.Find(&clusters); res.Error != nil {
return res.Error
}

for _, cluster := range clusters {
clusterDigests := []models.Image{}
subq := DB.Table("cluster_image").
Joins("JOIN image ON cluster_image.image_id = image.id").
Where("cluster_image.cluster_id = ?", cluster.ID)
if res := DB.Joins("JOIN (?) AS cluster_image ON image.id = cluster_image.image_id", subq).
Find(&clusterDigests); res.Error != nil {
return res.Error
}
if err := storage.UpdateClusterCache(DB, cluster.ID, clusterDigests); err != nil {
return err
}
}
return nil
}

func PopulateCaches(DB *gorm.DB) error {
return PopulateClusterCveCache(DB)
}

func ResetDB() error {
if testingDataPath == "" {
var err error
Expand All @@ -94,5 +128,8 @@ func ResetDB() error {
return err
}
_, err = plainDb.Exec(string(buf))
return err
if err != nil {
return err
}
return PopulateCaches(DB)
}

0 comments on commit 92a6803

Please sign in to comment.