update go-cache to v3.2.0 with support for ignoring errors (#1273)
This commit is contained in:
parent
eb08529f35
commit
da751c02fb
2
go.mod
2
go.mod
|
@ -5,7 +5,7 @@ go 1.19
|
||||||
require (
|
require (
|
||||||
codeberg.org/gruf/go-bytesize v1.0.0
|
codeberg.org/gruf/go-bytesize v1.0.0
|
||||||
codeberg.org/gruf/go-byteutil v1.0.2
|
codeberg.org/gruf/go-byteutil v1.0.2
|
||||||
codeberg.org/gruf/go-cache/v3 v3.1.8
|
codeberg.org/gruf/go-cache/v3 v3.2.0
|
||||||
codeberg.org/gruf/go-debug v1.2.0
|
codeberg.org/gruf/go-debug v1.2.0
|
||||||
codeberg.org/gruf/go-errors/v2 v2.0.2
|
codeberg.org/gruf/go-errors/v2 v2.0.2
|
||||||
codeberg.org/gruf/go-kv v1.5.2
|
codeberg.org/gruf/go-kv v1.5.2
|
||||||
|
|
4
go.sum
4
go.sum
|
@ -69,8 +69,8 @@ codeberg.org/gruf/go-bytesize v1.0.0/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacp
|
||||||
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
||||||
codeberg.org/gruf/go-byteutil v1.0.2 h1:OesVyK5VKWeWdeDR00zRJ+Oy8hjXx1pBhn7WVvcZWVE=
|
codeberg.org/gruf/go-byteutil v1.0.2 h1:OesVyK5VKWeWdeDR00zRJ+Oy8hjXx1pBhn7WVvcZWVE=
|
||||||
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
codeberg.org/gruf/go-byteutil v1.0.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.1.8 h1:wbUef/QtRstEb7sSpQYHT5CtSFtKkeZr4ZhOTXqOpac=
|
codeberg.org/gruf/go-cache/v3 v3.2.0 h1:pHJhS3SqufVnA2bxgzQpBh9Mfsljqulx2ynpy6thTE8=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.1.8/go.mod h1:h6im2UVGdrGtNt4IVKARVeoW4kAdok5ts7CbH15UWXs=
|
codeberg.org/gruf/go-cache/v3 v3.2.0/go.mod h1:d4xafgOjVE+4+82WjIqqJl8NQusXkgUHbkTuXoeB3fA=
|
||||||
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
|
codeberg.org/gruf/go-debug v1.2.0 h1:WBbTMnK1ArFKUmgv04aO2JiC/daTOB8zQGi521qb7OU=
|
||||||
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
codeberg.org/gruf/go-debug v1.2.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
||||||
|
|
|
@ -185,7 +185,7 @@ func (c *gtsCaches) User() *result.Cache[*gtsmodel.User] {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initAccount() {
|
func (c *gtsCaches) initAccount() {
|
||||||
c.account = result.NewSized([]result.Lookup{
|
c.account = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
{Name: "URL"},
|
{Name: "URL"},
|
||||||
|
@ -200,7 +200,7 @@ func (c *gtsCaches) initAccount() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initBlock() {
|
func (c *gtsCaches) initBlock() {
|
||||||
c.block = result.NewSized([]result.Lookup{
|
c.block = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "AccountID.TargetAccountID"},
|
{Name: "AccountID.TargetAccountID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -220,7 +220,7 @@ func (c *gtsCaches) initDomainBlock() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initEmoji() {
|
func (c *gtsCaches) initEmoji() {
|
||||||
c.emoji = result.NewSized([]result.Lookup{
|
c.emoji = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
{Name: "Shortcode.Domain"},
|
{Name: "Shortcode.Domain"},
|
||||||
|
@ -234,7 +234,7 @@ func (c *gtsCaches) initEmoji() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initEmojiCategory() {
|
func (c *gtsCaches) initEmojiCategory() {
|
||||||
c.emojiCategory = result.NewSized([]result.Lookup{
|
c.emojiCategory = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "Name"},
|
{Name: "Name"},
|
||||||
}, func(c1 *gtsmodel.EmojiCategory) *gtsmodel.EmojiCategory {
|
}, func(c1 *gtsmodel.EmojiCategory) *gtsmodel.EmojiCategory {
|
||||||
|
@ -246,7 +246,7 @@ func (c *gtsCaches) initEmojiCategory() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initMention() {
|
func (c *gtsCaches) initMention() {
|
||||||
c.mention = result.NewSized([]result.Lookup{
|
c.mention = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
|
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
|
||||||
m2 := new(gtsmodel.Mention)
|
m2 := new(gtsmodel.Mention)
|
||||||
|
@ -257,7 +257,7 @@ func (c *gtsCaches) initMention() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initNotification() {
|
func (c *gtsCaches) initNotification() {
|
||||||
c.notification = result.NewSized([]result.Lookup{
|
c.notification = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
}, func(n1 *gtsmodel.Notification) *gtsmodel.Notification {
|
}, func(n1 *gtsmodel.Notification) *gtsmodel.Notification {
|
||||||
n2 := new(gtsmodel.Notification)
|
n2 := new(gtsmodel.Notification)
|
||||||
|
@ -268,7 +268,7 @@ func (c *gtsCaches) initNotification() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initStatus() {
|
func (c *gtsCaches) initStatus() {
|
||||||
c.status = result.NewSized([]result.Lookup{
|
c.status = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
{Name: "URL"},
|
{Name: "URL"},
|
||||||
|
@ -282,7 +282,7 @@ func (c *gtsCaches) initStatus() {
|
||||||
|
|
||||||
// initTombstone will initialize the gtsmodel.Tombstone cache.
|
// initTombstone will initialize the gtsmodel.Tombstone cache.
|
||||||
func (c *gtsCaches) initTombstone() {
|
func (c *gtsCaches) initTombstone() {
|
||||||
c.tombstone = result.NewSized([]result.Lookup{
|
c.tombstone = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
}, func(t1 *gtsmodel.Tombstone) *gtsmodel.Tombstone {
|
}, func(t1 *gtsmodel.Tombstone) *gtsmodel.Tombstone {
|
||||||
|
@ -294,7 +294,7 @@ func (c *gtsCaches) initTombstone() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *gtsCaches) initUser() {
|
func (c *gtsCaches) initUser() {
|
||||||
c.user = result.NewSized([]result.Lookup{
|
c.user = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "AccountID"},
|
{Name: "AccountID"},
|
||||||
{Name: "Email"},
|
{Name: "Email"},
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
package result
|
package result
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-cache/v3/ttl"
|
"codeberg.org/gruf/go-cache/v3/ttl"
|
||||||
|
"codeberg.org/gruf/go-errors/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lookup represents a struct object lookup method in the cache.
|
// Lookup represents a struct object lookup method in the cache.
|
||||||
|
@ -16,6 +18,9 @@ type Lookup struct {
|
||||||
// AllowZero indicates whether to accept and cache
|
// AllowZero indicates whether to accept and cache
|
||||||
// under zero value keys, otherwise ignore them.
|
// under zero value keys, otherwise ignore them.
|
||||||
AllowZero bool
|
AllowZero bool
|
||||||
|
|
||||||
|
// TODO: support toggling case sensitive lookups.
|
||||||
|
// CaseSensitive bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache provides a means of caching value structures, along with
|
// Cache provides a means of caching value structures, along with
|
||||||
|
@ -24,17 +29,13 @@ type Lookup struct {
|
||||||
type Cache[Value any] struct {
|
type Cache[Value any] struct {
|
||||||
cache ttl.Cache[int64, result[Value]] // underlying result cache
|
cache ttl.Cache[int64, result[Value]] // underlying result cache
|
||||||
lookups structKeys // pre-determined struct lookups
|
lookups structKeys // pre-determined struct lookups
|
||||||
|
ignore func(error) bool // determines cacheable errors
|
||||||
copy func(Value) Value // copies a Value type
|
copy func(Value) Value // copies a Value type
|
||||||
next int64 // update key counter
|
next int64 // update key counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new initialized Cache, with given lookups and underlying value copy function.
|
// New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
|
||||||
func New[Value any](lookups []Lookup, copy func(Value) Value) *Cache[Value] {
|
func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] {
|
||||||
return NewSized(lookups, copy, 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSized returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
|
|
||||||
func NewSized[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] {
|
|
||||||
var z Value
|
var z Value
|
||||||
|
|
||||||
// Determine generic type
|
// Determine generic type
|
||||||
|
@ -63,6 +64,7 @@ func NewSized[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cac
|
||||||
c.cache.Init(0, cap, 0)
|
c.cache.Init(0, cap, 0)
|
||||||
c.SetEvictionCallback(nil)
|
c.SetEvictionCallback(nil)
|
||||||
c.SetInvalidateCallback(nil)
|
c.SetInvalidateCallback(nil)
|
||||||
|
c.IgnoreErrors(nil)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,8 +95,8 @@ func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
|
||||||
c.cache.SetEvictionCallback(func(item *ttl.Entry[int64, result[Value]]) {
|
c.cache.SetEvictionCallback(func(item *ttl.Entry[int64, result[Value]]) {
|
||||||
for _, key := range item.Value.Keys {
|
for _, key := range item.Value.Keys {
|
||||||
// Delete key->pkey lookup
|
// Delete key->pkey lookup
|
||||||
pkeys := key.key.pkeys
|
pkeys := key.info.pkeys
|
||||||
delete(pkeys, key.value)
|
delete(pkeys, key.key)
|
||||||
}
|
}
|
||||||
|
|
||||||
if item.Value.Error != nil {
|
if item.Value.Error != nil {
|
||||||
|
@ -116,8 +118,8 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
|
||||||
c.cache.SetInvalidateCallback(func(item *ttl.Entry[int64, result[Value]]) {
|
c.cache.SetInvalidateCallback(func(item *ttl.Entry[int64, result[Value]]) {
|
||||||
for _, key := range item.Value.Keys {
|
for _, key := range item.Value.Keys {
|
||||||
// Delete key->pkey lookup
|
// Delete key->pkey lookup
|
||||||
pkeys := key.key.pkeys
|
pkeys := key.info.pkeys
|
||||||
delete(pkeys, key.value)
|
delete(pkeys, key.key)
|
||||||
}
|
}
|
||||||
|
|
||||||
if item.Value.Error != nil {
|
if item.Value.Error != nil {
|
||||||
|
@ -130,7 +132,23 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the load function and caching that result.
|
// IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
|
||||||
|
func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) {
|
||||||
|
if ignore == nil {
|
||||||
|
ignore = func(err error) bool {
|
||||||
|
return errors.Is(
|
||||||
|
err,
|
||||||
|
context.Canceled,
|
||||||
|
context.DeadlineExceeded,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
c.cache.Lock()
|
||||||
|
c.ignore = ignore
|
||||||
|
c.cache.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
|
||||||
func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) {
|
func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) {
|
||||||
var (
|
var (
|
||||||
zero Value
|
zero Value
|
||||||
|
@ -146,7 +164,7 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
// Acquire cache lock
|
// Acquire cache lock
|
||||||
c.cache.Lock()
|
c.cache.Lock()
|
||||||
|
|
||||||
// Look for primary key for cache key
|
// Look for primary cache key
|
||||||
pkey, ok := keyInfo.pkeys[ckey]
|
pkey, ok := keyInfo.pkeys[ckey]
|
||||||
|
|
||||||
if ok {
|
if ok {
|
||||||
|
@ -159,17 +177,28 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
c.cache.Unlock()
|
c.cache.Unlock()
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
// Generate new result from fresh load.
|
// Generate fresh result.
|
||||||
res.Value, res.Error = load()
|
value, err := load()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if c.ignore(err) {
|
||||||
|
// don't cache this error type
|
||||||
|
return zero, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store error result.
|
||||||
|
res.Error = err
|
||||||
|
|
||||||
if res.Error != nil {
|
|
||||||
// This load returned an error, only
|
// This load returned an error, only
|
||||||
// store this item under provided key.
|
// store this item under provided key.
|
||||||
res.Keys = []cachedKey{{
|
res.Keys = []cacheKey{{
|
||||||
key: keyInfo,
|
info: keyInfo,
|
||||||
value: ckey,
|
key: ckey,
|
||||||
}}
|
}}
|
||||||
} else {
|
} else {
|
||||||
|
// Store value result.
|
||||||
|
res.Value = value
|
||||||
|
|
||||||
// This was a successful load, generate keys.
|
// This was a successful load, generate keys.
|
||||||
res.Keys = c.lookups.generate(res.Value)
|
res.Keys = c.lookups.generate(res.Value)
|
||||||
}
|
}
|
||||||
|
@ -178,8 +207,8 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
c.cache.Lock()
|
c.cache.Lock()
|
||||||
defer c.cache.Unlock()
|
defer c.cache.Unlock()
|
||||||
|
|
||||||
// Cache this result
|
// Cache result
|
||||||
c.storeResult(res)
|
c.store(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Catch and return error
|
// Catch and return error
|
||||||
|
@ -209,8 +238,8 @@ func (c *Cache[Value]) Store(value Value, store func() error) error {
|
||||||
c.cache.Lock()
|
c.cache.Lock()
|
||||||
defer c.cache.Unlock()
|
defer c.cache.Unlock()
|
||||||
|
|
||||||
// Cache this result
|
// Cache result
|
||||||
c.storeResult(result)
|
c.store(result)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -270,22 +299,13 @@ func (c *Cache[Value]) Clear() {
|
||||||
c.cache.Clear()
|
c.cache.Clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns the current length of the cache.
|
// store will cache this result under all of its required cache keys.
|
||||||
func (c *Cache[Value]) Len() int {
|
func (c *Cache[Value]) store(res result[Value]) {
|
||||||
return c.cache.Cache.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cap returns the maximum capacity of this result cache.
|
|
||||||
func (c *Cache[Value]) Cap() int {
|
|
||||||
return c.cache.Cache.Cap()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cache[Value]) storeResult(res result[Value]) {
|
|
||||||
for _, key := range res.Keys {
|
for _, key := range res.Keys {
|
||||||
pkeys := key.key.pkeys
|
pkeys := key.info.pkeys
|
||||||
|
|
||||||
// Look for cache primary key
|
// Look for cache primary key
|
||||||
pkey, ok := pkeys[key.value]
|
pkey, ok := pkeys[key.key]
|
||||||
|
|
||||||
if ok {
|
if ok {
|
||||||
// Get the overlapping result with this key.
|
// Get the overlapping result with this key.
|
||||||
|
@ -293,11 +313,11 @@ func (c *Cache[Value]) storeResult(res result[Value]) {
|
||||||
|
|
||||||
// From conflicting entry, drop this key, this
|
// From conflicting entry, drop this key, this
|
||||||
// will prevent eviction cleanup key confusion.
|
// will prevent eviction cleanup key confusion.
|
||||||
entry.Value.Keys.drop(key.key.name)
|
entry.Value.Keys.drop(key.info.name)
|
||||||
|
|
||||||
if len(entry.Value.Keys) == 0 {
|
if len(entry.Value.Keys) == 0 {
|
||||||
// We just over-wrote the only lookup key for
|
// We just over-wrote the only lookup key for
|
||||||
// this value, so we drop its primary key too
|
// this value, so we drop its primary key too.
|
||||||
c.cache.Cache.Delete(pkey)
|
c.cache.Cache.Delete(pkey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -306,11 +326,14 @@ func (c *Cache[Value]) storeResult(res result[Value]) {
|
||||||
// Get primary key
|
// Get primary key
|
||||||
pkey := c.next
|
pkey := c.next
|
||||||
c.next++
|
c.next++
|
||||||
|
if pkey > c.next {
|
||||||
|
panic("cache primary key overflow")
|
||||||
|
}
|
||||||
|
|
||||||
// Store all primary key lookups
|
// Store all primary key lookups
|
||||||
for _, key := range res.Keys {
|
for _, key := range res.Keys {
|
||||||
pkeys := key.key.pkeys
|
pkeys := key.info.pkeys
|
||||||
pkeys[key.value] = pkey
|
pkeys[key.key] = pkey
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store main entry under primary key, using evict hook if needed
|
// Store main entry under primary key, using evict hook if needed
|
||||||
|
|
|
@ -27,8 +27,8 @@ func (sk structKeys) get(name string) *structKey {
|
||||||
|
|
||||||
// generate will calculate and produce a slice of cache keys the given value
|
// generate will calculate and produce a slice of cache keys the given value
|
||||||
// can be stored under in the, as determined by receiving struct keys.
|
// can be stored under in the, as determined by receiving struct keys.
|
||||||
func (sk structKeys) generate(a any) []cachedKey {
|
func (sk structKeys) generate(a any) []cacheKey {
|
||||||
var keys []cachedKey
|
var keys []cacheKey
|
||||||
|
|
||||||
// Get reflected value in order
|
// Get reflected value in order
|
||||||
// to access the struct fields
|
// to access the struct fields
|
||||||
|
@ -43,8 +43,8 @@ func (sk structKeys) generate(a any) []cachedKey {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acquire byte buffer
|
// Acquire byte buffer
|
||||||
buf := bufpool.Get().(*byteutil.Buffer)
|
buf := getBuf()
|
||||||
defer bufpool.Put(buf)
|
defer putBuf(buf)
|
||||||
|
|
||||||
for i := range sk {
|
for i := range sk {
|
||||||
// Reset buffer
|
// Reset buffer
|
||||||
|
@ -68,39 +68,39 @@ func (sk structKeys) generate(a any) []cachedKey {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Append new cached key to slice
|
// Append new cached key to slice
|
||||||
keys = append(keys, cachedKey{
|
keys = append(keys, cacheKey{
|
||||||
key: &sk[i],
|
info: &sk[i],
|
||||||
value: string(buf.B), // copy
|
key: string(buf.B), // copy
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return keys
|
return keys
|
||||||
}
|
}
|
||||||
|
|
||||||
type cacheKeys []cachedKey
|
type cacheKeys []cacheKey
|
||||||
|
|
||||||
// drop will drop the cachedKey with lookup name from receiving cacheKeys slice.
|
// drop will drop the cachedKey with lookup name from receiving cacheKeys slice.
|
||||||
func (ck *cacheKeys) drop(name string) {
|
func (ck *cacheKeys) drop(name string) {
|
||||||
_ = *ck // move out of loop
|
_ = *ck // move out of loop
|
||||||
for i := range *ck {
|
for i := range *ck {
|
||||||
if (*ck)[i].key.name == name {
|
if (*ck)[i].info.name == name {
|
||||||
(*ck) = append((*ck)[:i], (*ck)[i+1:]...)
|
(*ck) = append((*ck)[:i], (*ck)[i+1:]...)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// cachedKey represents an actual cached key.
|
// cacheKey represents an actual cached key.
|
||||||
type cachedKey struct {
|
type cacheKey struct {
|
||||||
// key is a reference to the structKey this
|
// info is a reference to the structKey this
|
||||||
// cacheKey is representing. This is a shared
|
// cacheKey is representing. This is a shared
|
||||||
// reference and as such only the structKey.pkeys
|
// reference and as such only the structKey.pkeys
|
||||||
// lookup map is expecting to be modified.
|
// lookup map is expecting to be modified.
|
||||||
key *structKey
|
info *structKey
|
||||||
|
|
||||||
// value is the actual string representing
|
// value is the actual string representing
|
||||||
// this cache key for hashmap lookups.
|
// this cache key for hashmap lookups.
|
||||||
value string
|
key string
|
||||||
}
|
}
|
||||||
|
|
||||||
// structKey represents a list of struct fields
|
// structKey represents a list of struct fields
|
||||||
|
@ -196,9 +196,9 @@ func genKey(parts ...any) string {
|
||||||
panic("no key parts provided")
|
panic("no key parts provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Acquire buffer and reset
|
// Acquire byte buffer
|
||||||
buf := bufpool.Get().(*byteutil.Buffer)
|
buf := getBuf()
|
||||||
defer bufpool.Put(buf)
|
defer putBuf(buf)
|
||||||
buf.Reset()
|
buf.Reset()
|
||||||
|
|
||||||
// Encode each key part
|
// Encode each key part
|
||||||
|
@ -222,8 +222,19 @@ func isExported(fnName string) bool {
|
||||||
|
|
||||||
// bufpool provides a memory pool of byte
|
// bufpool provides a memory pool of byte
|
||||||
// buffers use when encoding key types.
|
// buffers use when encoding key types.
|
||||||
var bufpool = sync.Pool{
|
var bufPool = sync.Pool{
|
||||||
New: func() any {
|
New: func() any {
|
||||||
return &byteutil.Buffer{B: make([]byte, 0, 512)}
|
return &byteutil.Buffer{B: make([]byte, 0, 512)}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getBuf() *byteutil.Buffer {
|
||||||
|
return bufPool.Get().(*byteutil.Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putBuf(buf *byteutil.Buffer) {
|
||||||
|
if buf.Cap() > int(^uint16(0)) {
|
||||||
|
return // drop large bufs
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ codeberg.org/gruf/go-bytesize
|
||||||
# codeberg.org/gruf/go-byteutil v1.0.2
|
# codeberg.org/gruf/go-byteutil v1.0.2
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
codeberg.org/gruf/go-byteutil
|
codeberg.org/gruf/go-byteutil
|
||||||
# codeberg.org/gruf/go-cache/v3 v3.1.8
|
# codeberg.org/gruf/go-cache/v3 v3.2.0
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-cache/v3
|
codeberg.org/gruf/go-cache/v3
|
||||||
codeberg.org/gruf/go-cache/v3/result
|
codeberg.org/gruf/go-cache/v3/result
|
||||||
|
|
Loading…
Reference in New Issue