From 465459aff50f5b7810d4fd90a7728ca47a517a32 Mon Sep 17 00:00:00 2001 From: yyforyongyu Date: Wed, 1 Mar 2023 12:53:39 +0800 Subject: [PATCH] cache: replace `elementMap` with `syncMap` --- cache/lru/lru.go | 62 ++++++++++++++++++++-------------------- cache/lru/lru_test.go | 4 +-- cache/lru/sync_map.go | 66 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 99 insertions(+), 33 deletions(-) create mode 100644 cache/lru/sync_map.go diff --git a/cache/lru/lru.go b/cache/lru/lru.go index a79d9e67..c98bc20a 100644 --- a/cache/lru/lru.go +++ b/cache/lru/lru.go @@ -7,9 +7,6 @@ import ( "github.com/lightninglabs/neutrino/cache" ) -// elementMap is an alias for a map from a generic interface to a list.Element. -type elementMap[K comparable, V any] map[K]V - // entry represents a (key,value) pair entry in the Cache. The Cache's list // stores entries which let us get the cache key when an entry is evicted. type entry[K comparable, V cache.Value] struct { @@ -33,7 +30,7 @@ type Cache[K comparable, V cache.Value] struct { // cache is a generic cache which allows us to find an elements position // in the ll list from a given key. - cache elementMap[K, *Element[entry[K, V]]] + cache syncMap[K, *Element[entry[K, V]]] // mtx is used to make sure the Cache is thread-safe. mtx sync.RWMutex @@ -45,7 +42,7 @@ func NewCache[K comparable, V cache.Value](capacity uint64) *Cache[K, V] { return &Cache[K, V]{ capacity: capacity, ll: NewList[entry[K, V]](), - cache: make(map[K]*Element[entry[K, V]]), + cache: syncMap[K, *Element[entry[K, V]]]{}, } } @@ -84,7 +81,7 @@ func (c *Cache[K, V]) evict(needed uint64) (bool, error) { // Remove the element from the cache. c.ll.Remove(elr) - delete(c.cache, ce.key) + c.cache.Delete(ce.key) evicted = true } } @@ -108,17 +105,22 @@ func (c *Cache[K, V]) Put(key K, value V) (bool, error) { "cache with capacity %v", vs, c.capacity) } + // Load the element. + el, ok := c.cache.Load(key) + + // Update the internal list inside a lock. c.mtx.Lock() - defer c.mtx.Unlock() // If the element already exists, remove it and decrease cache's size. - el, ok := c.cache[key] if ok { es, err := el.Value.value.Size() if err != nil { + c.mtx.Unlock() + return false, fmt.Errorf("couldn't determine size of "+ "existing cache value %v", err) } + c.ll.Remove(el) c.size -= es } @@ -132,26 +134,31 @@ func (c *Cache[K, V]) Put(key K, value V) (bool, error) { // We have made enough space in the cache, so just insert it. el = c.ll.PushFront(entry[K, V]{key, value}) - c.cache[key] = el c.size += vs + // Release the lock. + c.mtx.Unlock() + + // Update the cache. + c.cache.Store(key, el) + return evicted, nil } // Get will return value for a given key, making the element the most recently // accessed item in the process. Will return nil if the key isn't found. func (c *Cache[K, V]) Get(key K) (V, error) { - c.mtx.Lock() - defer c.mtx.Unlock() - var defaultVal V - el, ok := c.cache[key] + el, ok := c.cache.Load(key) if !ok { // Element not found in the cache. return defaultVal, cache.ErrElementNotFound } + c.mtx.Lock() + defer c.mtx.Unlock() + // When the cache needs to evict a element to make space for another // one, it starts eviction from the back, so by moving this element to // the front, it's eviction is delayed because it's recently accessed. @@ -175,43 +182,36 @@ func (c *Cache[K, V]) Delete(key K) { // LoadAndDelete queries an item and deletes it from the cache using the // specified key. func (c *Cache[K, V]) LoadAndDelete(key K) (V, bool) { - c.mtx.Lock() - defer c.mtx.Unlock() - var defaultVal V // Noop if the element doesn't exist. - el, ok := c.cache[key] + el, ok := c.cache.LoadAndDelete(key) if !ok { return defaultVal, false } - // Before we delete the element, we need to get its size. + c.mtx.Lock() + defer c.mtx.Unlock() + + // Get its size. vs, err := el.Value.value.Size() if err != nil { return defaultVal, false } - // Remove the element from the list. + // Remove the element from the list and update the cache's size. c.ll.Remove(el) - - // Delete the element from the cache and update the cache's size. - delete(c.cache, key) c.size -= vs return el.Value.value, true } // Range iterates the cache. -// -// NOTE: the `visitor` is wrapped inside the Cache's mutex. func (c *Cache[K, V]) Range(visitor func(K, V) bool) { - c.mtx.RLock() - defer c.mtx.RUnlock() - - for k, v := range c.cache { - if !visitor(k, v.Value.value) { - return - } + // valueVisitor is a closure to help unwrap the value from the cache. + valueVisitor := func(key K, value *Element[entry[K, V]]) bool { + return visitor(key, value.Value.value) } + + c.cache.Range(valueVisitor) } diff --git a/cache/lru/lru_test.go b/cache/lru/lru_test.go index b4680102..3be389fa 100644 --- a/cache/lru/lru_test.go +++ b/cache/lru/lru_test.go @@ -97,7 +97,7 @@ func TestElementSizeCapacityEvictsEverything(t *testing.T) { // Insert element with size=capacity of cache, should evict everything. c.Put(4, &sizeable{value: 4, size: 3}) require.Equal(t, c.Len(), 1) - require.Equal(t, len(c.cache), 1) + require.Equal(t, c.cache.Len(), 1) four := getSizeableValue(c.Get(4)) require.Equal(t, four, 4) @@ -110,7 +110,7 @@ func TestElementSizeCapacityEvictsEverything(t *testing.T) { // Insert element with size=capacity of cache. c.Put(4, &sizeable{value: 4, size: 6}) require.Equal(t, c.Len(), 1) - require.Equal(t, len(c.cache), 1) + require.Equal(t, c.cache.Len(), 1) four = getSizeableValue(c.Get(4)) require.Equal(t, four, 4) } diff --git a/cache/lru/sync_map.go b/cache/lru/sync_map.go new file mode 100644 index 00000000..448435b4 --- /dev/null +++ b/cache/lru/sync_map.go @@ -0,0 +1,66 @@ +package lru + +import "sync" + +// syncMap wraps a sync.Map with type parameters such that it's easier to +// access the items stored in the map since no type assertion is needed. It +// also requires explicit type definition when declaring and initiating the +// variables, which helps us understanding what's stored in a given map. +// +// NOTE: this is unexported to avoid confusion with `lnd`'s `SyncMap`. +type syncMap[K comparable, V any] struct { + sync.Map +} + +// Store puts an item in the map. +func (m *syncMap[K, V]) Store(key K, value V) { + m.Map.Store(key, value) +} + +// Load queries an item from the map using the specified key. If the item +// cannot be found, an empty value and false will be returned. If the stored +// item fails the type assertion, a nil value and false will be returned. +func (m *syncMap[K, V]) Load(key K) (V, bool) { + result, ok := m.Map.Load(key) + if !ok { + return *new(V), false // nolint: gocritic + } + + item, ok := result.(V) + return item, ok +} + +// Delete removes an item from the map specified by the key. +func (m *syncMap[K, V]) Delete(key K) { + m.Map.Delete(key) +} + +// LoadAndDelete queries an item and deletes it from the map using the +// specified key. +func (m *syncMap[K, V]) LoadAndDelete(key K) (V, bool) { + result, loaded := m.Map.LoadAndDelete(key) + if !loaded { + return *new(V), loaded // nolint: gocritic + } + + item, ok := result.(V) + return item, ok +} + +// Range iterates the map. +func (m *syncMap[K, V]) Range(visitor func(K, V) bool) { + m.Map.Range(func(k any, v any) bool { + return visitor(k.(K), v.(V)) + }) +} + +// Len returns the number of items in the map. +func (m *syncMap[K, V]) Len() int { + var count int + m.Range(func(K, V) bool { + count++ + return true + }) + + return count +}