Skip to content

Commit

Permalink
Fix memory leak in interquery cache
Browse files Browse the repository at this point in the history
Signed-off-by: Aleksander <[email protected]>
  • Loading branch information
asleire authored and ashutosh-narkar committed Dec 21, 2022
1 parent a08934d commit 600899a
Show file tree
Hide file tree
Showing 2 changed files with 40 additions and 9 deletions.
30 changes: 21 additions & 9 deletions topdown/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,15 +75,20 @@ type InterQueryCache interface {
// NewInterQueryCache returns a new inter-query cache.
func NewInterQueryCache(config *Config) InterQueryCache {
return &cache{
items: map[string]InterQueryCacheValue{},
items: map[string]cacheItem{},
usage: 0,
config: config,
l: list.New(),
}
}

type cacheItem struct {
value InterQueryCacheValue
keyElement *list.Element
}

type cache struct {
items map[string]InterQueryCacheValue
items map[string]cacheItem
usage int64
config *Config
l *list.List
Expand All @@ -101,7 +106,12 @@ func (c *cache) Insert(k ast.Value, v InterQueryCacheValue) (dropped int) {
func (c *cache) Get(k ast.Value) (InterQueryCacheValue, bool) {
c.mtx.Lock()
defer c.mtx.Unlock()
return c.unsafeGet(k)
cacheItem, ok := c.unsafeGet(k)

if ok {
return cacheItem.value, true
}
return nil, false
}

// Delete deletes the value in the cache for k.
Expand Down Expand Up @@ -133,33 +143,35 @@ func (c *cache) unsafeInsert(k ast.Value, v InterQueryCacheValue) (dropped int)
for key := c.l.Front(); key != nil && (c.usage+size > limit); key = c.l.Front() {
dropKey := key.Value.(ast.Value)
c.unsafeDelete(dropKey)
c.l.Remove(key)
dropped++
}
}

// By deleting the old value, if it exists, we ensure the usage variable stays correct
c.unsafeDelete(k)

c.items[k.String()] = v
c.l.PushBack(k)
c.items[k.String()] = cacheItem{
value: v,
keyElement: c.l.PushBack(k),
}
c.usage += size
return dropped
}

func (c *cache) unsafeGet(k ast.Value) (InterQueryCacheValue, bool) {
func (c *cache) unsafeGet(k ast.Value) (cacheItem, bool) {
value, ok := c.items[k.String()]
return value, ok
}

func (c *cache) unsafeDelete(k ast.Value) {
value, ok := c.unsafeGet(k)
cacheItem, ok := c.unsafeGet(k)
if !ok {
return
}

c.usage -= value.SizeInBytes()
c.usage -= cacheItem.value.SizeInBytes()
delete(c.items, k.String())
c.l.Remove(cacheItem.keyElement)
}

func (c *cache) maxSizeBytes() int64 {
Expand Down
19 changes: 19 additions & 0 deletions topdown/cache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,16 +140,19 @@ func TestInsert(t *testing.T) {
if !found {
t.Fatal("Expected key \"foo5\" in cache")
}
verifyCacheList(t, cache)

// replacing an existing key should not affect cache size
cache = NewInterQueryCache(config)

cacheValue6 := newInterQueryCacheValue(ast.String("bar6"), 10)
cache.Insert(ast.String("foo6"), cacheValue6)
cache.Insert(ast.String("foo6"), cacheValue6)
verifyCacheList(t, cache)

cacheValue7 := newInterQueryCacheValue(ast.String("bar7"), 10)
dropped = cache.Insert(ast.StringTerm("foo7").Value, cacheValue7)
verifyCacheList(t, cache)

if dropped != 0 {
t.Fatal("Expected dropped to be zero")
Expand Down Expand Up @@ -186,6 +189,7 @@ func TestConcurrentInsert(t *testing.T) {

cacheValue3 := newInterQueryCacheValue(ast.String("bar3"), 5)
dropped := cache.Insert(ast.String("foo3"), cacheValue3)
verifyCacheList(t, cache)

if dropped != 0 {
t.Fatal("Expected dropped to be zero")
Expand Down Expand Up @@ -221,13 +225,15 @@ func TestDelete(t *testing.T) {
if dropped != 0 {
t.Fatal("Expected dropped to be zero")
}
verifyCacheList(t, cache)

cache.Delete(ast.StringTerm("foo").Value)

_, found := cache.Get(ast.StringTerm("foo").Value)
if found {
t.Fatal("Unexpected key \"foo\" in cache")
}
verifyCacheList(t, cache)
}

func TestUpdateConfig(t *testing.T) {
Expand Down Expand Up @@ -268,6 +274,19 @@ func TestDefaultMaxSizeBytes(t *testing.T) {
}
}

// Verifies that the size of c.l is identical to the size of c.items
// Since the size of c.items is limited by c.usage, this helps us
// avoid a situation where c.l can grow indefinitely causing a memory leak
func verifyCacheList(t *testing.T, c InterQueryCache) {
actualC, ok := c.(*cache)
if !ok {
t.Fatal("Unexpected error converting InterQueryCache to cache struct")
}
if len(actualC.items) != actualC.l.Len() {
t.Fatal("actualC.l should contain equally many elements as actualC.items")
}
}

type testInterQueryCacheValue struct {
value ast.Value
size int
Expand Down

0 comments on commit 600899a

Please sign in to comment.