Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

extract cachestats to record cache stats #924

Merged
merged 2 commits into from
Dec 17, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions chain/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@ func newCache(maxSize int) *cache {
return &cache{c}
}

func (c *cache) GetOrLoad(key interface{}, load func() (interface{}, error)) (interface{}, error) {
func (c *cache) GetOrLoad(key interface{}, load func() (interface{}, error)) (interface{}, bool, error) {
if value, ok := c.Get(key); ok {
return value, nil
return value, true, nil
}
value, err := load()
if err != nil {
return nil, err
return nil, false, err
}
c.Add(key, value)
return value, nil
return value, false, nil
}
2 changes: 1 addition & 1 deletion chain/metric.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,5 +8,5 @@ package chain
import "github.com/vechain/thor/v2/metrics"

var (
metricCacheHitMiss = metrics.LazyLoadCounterVec("repo_cache_hit_miss_count", []string{"type", "event"})
metricCacheHitMiss = metrics.LazyLoadGaugeVec("repo_cache_hit_miss_count", []string{"type", "event"})
)
59 changes: 43 additions & 16 deletions chain/repository.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,12 @@ type Repository struct {
summaries *cache
txs *cache
receipts *cache

stats struct {
summaries thor.CacheStats
txs thor.CacheStats
receipts thor.CacheStats
}
}
}

Expand Down Expand Up @@ -321,29 +327,44 @@ func (r *Repository) GetMaxBlockNum() (uint32, error) {
}

// GetBlockSummary get block summary by block id.
func (r *Repository) GetBlockSummary(id thor.Bytes32) (summary *BlockSummary, err error) {
var blk interface{}
result := "hit"
if blk, err = r.caches.summaries.GetOrLoad(id, func() (interface{}, error) {
result = "miss"
func (r *Repository) GetBlockSummary(id thor.Bytes32) (*BlockSummary, error) {
blk, cached, err := r.caches.summaries.GetOrLoad(id, func() (interface{}, error) {
return loadBlockSummary(r.data, id)
}); err != nil {
return
})
if err != nil {
return nil, err
}

if cached {
if r.caches.stats.summaries.Hit()%2000 == 0 {
_, hit, miss := r.caches.stats.summaries.Stats()
metricCacheHitMiss().SetWithLabel(hit, map[string]string{"type": "blocks", "event": "hit"})
metricCacheHitMiss().SetWithLabel(miss, map[string]string{"type": "blocks", "event": "miss"})
}
} else {
r.caches.stats.summaries.Miss()
}
metricCacheHitMiss().AddWithLabel(1, map[string]string{"type": "blocks", "event": result})

return blk.(*BlockSummary), nil
}

func (r *Repository) getTransaction(key txKey) (*tx.Transaction, error) {
result := "hit"
trx, err := r.caches.txs.GetOrLoad(key, func() (interface{}, error) {
result = "miss"
trx, cached, err := r.caches.txs.GetOrLoad(key, func() (interface{}, error) {
return loadTransaction(r.data, key)
})
if err != nil {
return nil, err
}
metricCacheHitMiss().AddWithLabel(1, map[string]string{"type": "transaction", "event": result})

if cached {
if r.caches.stats.txs.Hit()%2000 == 0 {
_, hit, miss := r.caches.stats.txs.Stats()
metricCacheHitMiss().SetWithLabel(hit, map[string]string{"type": "transaction", "event": "hit"})
metricCacheHitMiss().SetWithLabel(miss, map[string]string{"type": "transaction", "event": "miss"})
}
} else {
r.caches.stats.txs.Miss()
}
return trx.(*tx.Transaction), nil
}

Expand Down Expand Up @@ -383,15 +404,21 @@ func (r *Repository) GetBlock(id thor.Bytes32) (*block.Block, error) {
}

func (r *Repository) getReceipt(key txKey) (*tx.Receipt, error) {
result := "hit"
receipt, err := r.caches.receipts.GetOrLoad(key, func() (interface{}, error) {
result = "miss"
receipt, cached, err := r.caches.receipts.GetOrLoad(key, func() (interface{}, error) {
return loadReceipt(r.data, key)
})
if err != nil {
return nil, err
}
metricCacheHitMiss().AddWithLabel(1, map[string]string{"type": "receipt", "event": result})
if cached {
if r.caches.stats.receipts.Hit()%2000 == 0 {
_, hit, miss := r.caches.stats.receipts.Stats()
metricCacheHitMiss().SetWithLabel(hit, map[string]string{"type": "receipt", "event": "hit"})
metricCacheHitMiss().SetWithLabel(miss, map[string]string{"type": "receipt", "event": "miss"})
}
} else {
r.caches.stats.receipts.Miss()
}

return receipt.(*tx.Receipt), nil
}
Expand Down
67 changes: 28 additions & 39 deletions muxdb/internal/trie/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (

lru "github.com/hashicorp/golang-lru"
"github.com/qianbin/directcache"
"github.com/vechain/thor/v2/thor"
"github.com/vechain/thor/v2/trie"
)

Expand All @@ -24,8 +25,8 @@ type Cache struct {
committedNodes *directcache.Cache
// caches root nodes.
roots *lru.ARCCache
nodeStats cacheStats
rootStats cacheStats
nodeStats thor.CacheStats
rootStats thor.CacheStats
lastLogTime int64
}

Expand All @@ -45,17 +46,21 @@ func (c *Cache) log() {
last := atomic.SwapInt64(&c.lastLogTime, now)

if now-last > int64(time.Second*20) {
logNode, hitNode, missNode, okNode := c.nodeStats.shouldLog("node cache stats")
logRoot, hitRoot, missRoot, okRoot := c.rootStats.shouldLog("root cache stats")

if okNode || okRoot {
logNode()
metricCacheHitMissGaugeVec().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"})
metricCacheHitMissGaugeVec().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"})
logRoot()
metricCacheHitMissGaugeVec().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"})
metricCacheHitMissGaugeVec().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"})
shouldNode, hitNode, missNode := c.nodeStats.Stats()
shouldRoot, hitRoot, missRoot := c.rootStats.Stats()

// log two categories together only one of the hit rate has
// changed compared to the last run, to avoid too many logs.
if shouldNode || shouldRoot {
logStats("node cache stats", hitNode, missNode)
logStats("root cache stats", hitRoot, missRoot)
}

// metrics will reported every 20 seconds
metricCacheHitMiss().SetWithLabel(hitNode, map[string]string{"type": "node", "event": "hit"})
metricCacheHitMiss().SetWithLabel(missNode, map[string]string{"type": "node", "event": "miss"})
metricCacheHitMiss().SetWithLabel(hitRoot, map[string]string{"type": "root", "event": "hit"})
metricCacheHitMiss().SetWithLabel(missRoot, map[string]string{"type": "root", "event": "miss"})
} else {
atomic.CompareAndSwapInt64(&c.lastLogTime, now, last)
}
Expand Down Expand Up @@ -185,33 +190,17 @@ func (c *Cache) GetRootNode(name string, seq uint64, peek bool) (trie.Node, bool
return trie.Node{}, false
}

type cacheStats struct {
hit, miss int64
flag int32
}

func (cs *cacheStats) Hit() int64 { return atomic.AddInt64(&cs.hit, 1) }
func (cs *cacheStats) Miss() int64 { return atomic.AddInt64(&cs.miss, 1) }

func (cs *cacheStats) shouldLog(msg string) (func(), int64, int64, bool) {
hit := atomic.LoadInt64(&cs.hit)
miss := atomic.LoadInt64(&cs.miss)
func logStats(msg string, hit, miss int64) {
lookups := hit + miss
var str string
if lookups > 0 {
str = fmt.Sprintf("%.3f", float64(hit)/float64(lookups))
} else {
str = "n/a"
}

hitrate := float64(hit) / float64(lookups)
flag := int32(hitrate * 1000)
return func() {
var str string
if lookups > 0 {
str = fmt.Sprintf("%.3f", hitrate)
} else {
str = "n/a"
}

logger.Info(msg,
"lookups", lookups,
"hitrate", str,
)
atomic.StoreInt32(&cs.flag, flag)
}, hit, miss, atomic.LoadInt32(&cs.flag) != flag
logger.Info(msg,
"lookups", lookups,
"hitrate", str,
)
}
2 changes: 1 addition & 1 deletion muxdb/internal/trie/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ import (
"github.com/vechain/thor/v2/metrics"
)

var metricCacheHitMissGaugeVec = metrics.LazyLoadGaugeVec("cache_hit_miss_count", []string{"type", "event"})
var metricCacheHitMiss = metrics.LazyLoadGaugeVec("cache_hit_miss_count", []string{"type", "event"})
64 changes: 38 additions & 26 deletions runtime/runtime.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
package runtime

import (
"fmt"
"math/big"
"sync/atomic"

Expand Down Expand Up @@ -249,50 +250,54 @@ func (rt *Runtime) newEVM(stateDB *statedb.StateDB, clauseIndex uint32, txCtx *x
},
OnSuicideContract: func(_ *vm.EVM, contractAddr, tokenReceiver common.Address) {
// it's IMPORTANT to process energy before token
amount, err := rt.state.GetEnergy(thor.Address(contractAddr), rt.ctx.Time)
energy, err := rt.state.GetEnergy(thor.Address(contractAddr), rt.ctx.Time)
if err != nil {
panic(err)
}
if amount.Sign() != 0 {
// add remained energy of suiciding contract to receiver.
// no need to clear contract's energy, vm will delete the whole contract later.
bal := stateDB.GetBalance(contractAddr)

if bal.Sign() != 0 || energy.Sign() != 0 {
receiverEnergy, err := rt.state.GetEnergy(thor.Address(tokenReceiver), rt.ctx.Time)
if err != nil {
panic(err)
}
// touch the receiver's energy
// no need to clear contract's energy, vm will delete the whole contract later.
if err := rt.state.SetEnergy(
thor.Address(tokenReceiver),
new(big.Int).Add(receiverEnergy, amount),
new(big.Int).Add(receiverEnergy, energy),
rt.ctx.Time); err != nil {
panic(err)
}

// see ERC20's Transfer event
topics := []common.Hash{
common.Hash(energyTransferEvent.ID()),
common.BytesToHash(contractAddr[:]),
common.BytesToHash(tokenReceiver[:]),
// emit event if there is energy in the account
if energy.Sign() != 0 {
// see ERC20's Transfer event
topics := []common.Hash{
common.Hash(energyTransferEvent.ID()),
common.BytesToHash(contractAddr[:]),
common.BytesToHash(tokenReceiver[:]),
}

data, err := energyTransferEvent.Encode(energy)
if err != nil {
panic(err)
}

stateDB.AddLog(&types.Log{
Address: common.Address(builtin.Energy.Address),
Topics: topics,
Data: data,
})
}

data, err := energyTransferEvent.Encode(amount)
if err != nil {
panic(err)
}

stateDB.AddLog(&types.Log{
Address: common.Address(builtin.Energy.Address),
Topics: topics,
Data: data,
})
}

if amount := stateDB.GetBalance(contractAddr); amount.Sign() != 0 {
stateDB.AddBalance(tokenReceiver, amount)
if bal.Sign() != 0 {
stateDB.AddBalance(tokenReceiver, bal)

stateDB.AddTransfer(&tx.Transfer{
Sender: thor.Address(contractAddr),
Recipient: thor.Address(tokenReceiver),
Amount: amount,
Amount: bal,
})
}
},
Expand Down Expand Up @@ -328,7 +333,14 @@ func (rt *Runtime) PrepareClause(
defer func() {
if e := recover(); e != nil {
// caught state error
err = e.(error)
switch e := e.(type) {
case error:
err = e
case string:
err = errors.New(e)
default:
err = fmt.Errorf("runtime: unknown error: %v", e)
}
}
}()

Expand Down
35 changes: 35 additions & 0 deletions thor/cachestats.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// Copyright (c) 2024 The VeChainThor developers

// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or <https://www.gnu.org/licenses/lgpl-3.0.html>
package thor

import "sync/atomic"

// CacheStats is a utility for collecting cache hit/miss.
type CacheStats struct {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we put this in metrics dir?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Captured in a ticket 👍

hit, miss atomic.Int64
flag atomic.Int32
}

// Hit records a hit.
func (cs *CacheStats) Hit() int64 { return cs.hit.Add(1) }

// Miss records a miss.
func (cs *CacheStats) Miss() int64 { return cs.miss.Add(1) }

// Stats returns the number of hits and misses and whether
// the hit rate was changed comparing to the last call.
func (cs *CacheStats) Stats() (bool, int64, int64) {
hit := cs.hit.Load()
miss := cs.miss.Load()
lookups := hit + miss

hitRate := float64(0)
if lookups > 0 {
hitRate = float64(hit) / float64(lookups)
}
flag := int32(hitRate * 1000)

return cs.flag.Swap(flag) != flag, hit, miss
}
35 changes: 35 additions & 0 deletions thor/cachestats_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// Copyright (c) 2024 The VeChainThor developers

// Distributed under the GNU Lesser General Public License v3.0 software license, see the accompanying
// file LICENSE or <https://www.gnu.org/licenses/lgpl-3.0.html>

package thor

import (
"testing"

"github.com/stretchr/testify/assert"
)

func TestCacheStats(t *testing.T) {
cs := &CacheStats{}
cs.Hit()
cs.Miss()
_, hit, miss := cs.Stats()

assert.Equal(t, int64(1), hit)
assert.Equal(t, int64(1), miss)

changed, _, _ := cs.Stats()
assert.False(t, changed)

cs.Hit()
cs.Miss()
assert.Equal(t, int64(3), cs.Hit())

changed, hit, miss = cs.Stats()

assert.Equal(t, int64(3), hit)
assert.Equal(t, int64(2), miss)
assert.True(t, changed)
}
Loading