[feature] simpler cache size configuration (#2051)
* add automatic cache max size generation based on ratios of a singular fixed memory target Signed-off-by: kim <grufwub@gmail.com> * remove now-unused cache max-size config variables Signed-off-by: kim <grufwub@gmail.com> * slight ratio tweak Signed-off-by: kim <grufwub@gmail.com> * remove unused visibility config var Signed-off-by: kim <grufwub@gmail.com> * add secret little ratio config trick Signed-off-by: kim <grufwub@gmail.com> * fixed a word Signed-off-by: kim <grufwub@gmail.com> * update cache library to remove use of TTL in result caches + slice cache Signed-off-by: kim <grufwub@gmail.com> * update other cache usages to use correct interface Signed-off-by: kim <grufwub@gmail.com> * update example config to explain the cache memory target Signed-off-by: kim <grufwub@gmail.com> * update env parsing test with new config values Signed-off-by: kim <grufwub@gmail.com> * do some ratio twiddling Signed-off-by: kim <grufwub@gmail.com> * add missing header * update envparsing with latest defaults Signed-off-by: kim <grufwub@gmail.com> * update size calculations to take into account result cache, simple cache and extra map overheads Signed-off-by: kim <grufwub@gmail.com> * tweak the ratios some more Signed-off-by: kim <grufwub@gmail.com> * more nan rampaging Signed-off-by: kim <grufwub@gmail.com> * fix envparsing script Signed-off-by: kim <grufwub@gmail.com> * update cache library, add sweep function to keep caches trim Signed-off-by: kim <grufwub@gmail.com> * sweep caches once a minute Signed-off-by: kim <grufwub@gmail.com> * add a regular job to sweep caches and keep under 80% utilisation Signed-off-by: kim <grufwub@gmail.com> * remove dead code Signed-off-by: kim <grufwub@gmail.com> * add new size library used to libraries section of readme Signed-off-by: kim <grufwub@gmail.com> * add better explanations for the mem-ratio numbers Signed-off-by: kim <grufwub@gmail.com> * update go-cache Signed-off-by: kim <grufwub@gmail.com> * library version bump Signed-off-by: kim <grufwub@gmail.com> * update cache.result{} size model estimation Signed-off-by: kim <grufwub@gmail.com> --------- Signed-off-by: kim <grufwub@gmail.com>
This commit is contained in:
parent
e8a20f587c
commit
00adf18c24
|
@ -224,6 +224,7 @@ The following open source libraries, frameworks, and tools are used by GoToSocia
|
||||||
- [buckket/go-blurhash](https://github.com/buckket/go-blurhash); used for generating image blurhashes. [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html).
|
- [buckket/go-blurhash](https://github.com/buckket/go-blurhash); used for generating image blurhashes. [GPL-3.0 License](https://spdx.org/licenses/GPL-3.0-only.html).
|
||||||
- [coreos/go-oidc](https://github.com/coreos/go-oidc); OIDC client library. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
|
- [coreos/go-oidc](https://github.com/coreos/go-oidc); OIDC client library. [Apache-2.0 License](https://spdx.org/licenses/Apache-2.0.html).
|
||||||
- [disintegration/imaging](https://github.com/disintegration/imaging); image resizing. [MIT License](https://spdx.org/licenses/MIT.html).
|
- [disintegration/imaging](https://github.com/disintegration/imaging); image resizing. [MIT License](https://spdx.org/licenses/MIT.html).
|
||||||
|
- [DmitriyVTitov/size](https://github.com/DmitriyVTitov/size); runtime model memory size calculations. [MIT License](https://spdx.org/licenses/MIT.html).
|
||||||
- Gin:
|
- Gin:
|
||||||
- [gin-contrib/cors](https://github.com/gin-contrib/cors); Gin CORS middleware. [MIT License](https://spdx.org/licenses/MIT.html).
|
- [gin-contrib/cors](https://github.com/gin-contrib/cors); Gin CORS middleware. [MIT License](https://spdx.org/licenses/MIT.html).
|
||||||
- [gin-contrib/gzip](https://github.com/gin-contrib/gzip); Gin gzip middleware. [MIT License](https://spdx.org/licenses/MIT.html).
|
- [gin-contrib/gzip](https://github.com/gin-contrib/gzip); Gin gzip middleware. [MIT License](https://spdx.org/licenses/MIT.html).
|
||||||
|
|
|
@ -25,7 +25,9 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-sched"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
"github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
|
"github.com/superseriousbusiness/gotosocial/cmd/gotosocial/action"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/api"
|
"github.com/superseriousbusiness/gotosocial/internal/api"
|
||||||
|
@ -117,6 +119,13 @@ var Start action.GTSAction = func(ctx context.Context) error {
|
||||||
state.Workers.Start()
|
state.Workers.Start()
|
||||||
defer state.Workers.Stop()
|
defer state.Workers.Stop()
|
||||||
|
|
||||||
|
// Add a task to the scheduler to sweep caches.
|
||||||
|
// Frequency = 1 * minute
|
||||||
|
// Threshold = 80% capacity
|
||||||
|
sweep := func(time.Time) { state.Caches.Sweep(80) }
|
||||||
|
job := sched.NewJob(sweep).Every(time.Minute)
|
||||||
|
_ = state.Workers.Scheduler.Schedule(job)
|
||||||
|
|
||||||
// Build handlers used in later initializations.
|
// Build handlers used in later initializations.
|
||||||
mediaManager := media.NewManager(&state)
|
mediaManager := media.NewManager(&state)
|
||||||
oauthServer := oauth.New(ctx, dbService)
|
oauthServer := oauth.New(ctx, dbService)
|
||||||
|
|
|
@ -231,111 +231,13 @@ db-sqlite-cache-size: "8MiB"
|
||||||
db-sqlite-busy-timeout: "30m"
|
db-sqlite-busy-timeout: "30m"
|
||||||
|
|
||||||
cache:
|
cache:
|
||||||
# Cache configuration options:
|
# cache.memory-target sets a target limit that
|
||||||
#
|
# the application will try to keep it's caches
|
||||||
# max-size = maximum cached objects count
|
# within. This is based on estimated sizes of
|
||||||
# ttl = cached object lifetime
|
# in-memory objects, and so NOT AT ALL EXACT.
|
||||||
# sweep-freq = frequency to look for stale cache objects
|
# Examples: ["100MiB", "200MiB", "500MiB", "1GiB"]
|
||||||
# (zero will disable cache sweeping)
|
# Default: "200MiB"
|
||||||
|
memory-target: "200MiB"
|
||||||
#############################
|
|
||||||
#### VISIBILITY CACHES ######
|
|
||||||
#############################
|
|
||||||
#
|
|
||||||
# Configure Status and account
|
|
||||||
# visibility cache.
|
|
||||||
|
|
||||||
visibility-max-size: 2000
|
|
||||||
visibility-ttl: "30m"
|
|
||||||
visibility-sweep-freq: "1m"
|
|
||||||
|
|
||||||
gts:
|
|
||||||
###########################
|
|
||||||
#### DATABASE CACHES ######
|
|
||||||
###########################
|
|
||||||
#
|
|
||||||
# Configure GTS database
|
|
||||||
# model caches.
|
|
||||||
|
|
||||||
account-max-size: 2000
|
|
||||||
account-ttl: "30m"
|
|
||||||
account-sweep-freq: "1m"
|
|
||||||
|
|
||||||
block-max-size: 1000
|
|
||||||
block-ttl: "30m"
|
|
||||||
block-sweep-freq: "1m"
|
|
||||||
|
|
||||||
domain-block-max-size: 2000
|
|
||||||
domain-block-ttl: "24h"
|
|
||||||
domain-block-sweep-freq: "1m"
|
|
||||||
|
|
||||||
emoji-max-size: 2000
|
|
||||||
emoji-ttl: "30m"
|
|
||||||
emoji-sweep-freq: "1m"
|
|
||||||
|
|
||||||
emoji-category-max-size: 100
|
|
||||||
emoji-category-ttl: "30m"
|
|
||||||
emoji-category-sweep-freq: "1m"
|
|
||||||
|
|
||||||
follow-max-size: 2000
|
|
||||||
follow-ttl: "30m"
|
|
||||||
follow-sweep-freq: "1m"
|
|
||||||
|
|
||||||
follow-request-max-size: 2000
|
|
||||||
follow-request-ttl: "30m"
|
|
||||||
follow-request-sweep-freq: "1m"
|
|
||||||
|
|
||||||
instance-max-size: 2000
|
|
||||||
instance-ttl: "30m"
|
|
||||||
instance-sweep-freq: "1m"
|
|
||||||
|
|
||||||
list-max-size: 2000
|
|
||||||
list-ttl: "30m"
|
|
||||||
list-sweep-freq: "1m"
|
|
||||||
|
|
||||||
list-entry-max-size: 2000
|
|
||||||
list-entry-ttl: "30m"
|
|
||||||
list-entry-sweep-freq: "1m"
|
|
||||||
|
|
||||||
media-max-size: 1000
|
|
||||||
media-ttl: "30m"
|
|
||||||
media-sweep-freq: "1m"
|
|
||||||
|
|
||||||
mention-max-size: 2000
|
|
||||||
mention-ttl: "30m"
|
|
||||||
mention-sweep-freq: "1m"
|
|
||||||
|
|
||||||
notification-max-size: 1000
|
|
||||||
notification-ttl: "30m"
|
|
||||||
notification-sweep-freq: "1m"
|
|
||||||
|
|
||||||
report-max-size: 100
|
|
||||||
report-ttl: "30m"
|
|
||||||
report-sweep-freq: "1m"
|
|
||||||
|
|
||||||
status-max-size: 2000
|
|
||||||
status-ttl: "30m"
|
|
||||||
status-sweep-freq: "1m"
|
|
||||||
|
|
||||||
status-fave-max-size: 2000
|
|
||||||
status-fave-ttl: "30m"
|
|
||||||
status-fave-sweep-freq: "1m"
|
|
||||||
|
|
||||||
tag-max-size: 2000
|
|
||||||
tag-ttl: "30m"
|
|
||||||
tag-sweep-freq: "1m"
|
|
||||||
|
|
||||||
tombstone-max-size: 500
|
|
||||||
tombstone-ttl: "30m"
|
|
||||||
tombstone-sweep-freq: "1m"
|
|
||||||
|
|
||||||
user-max-size: 500
|
|
||||||
user-ttl: "30m"
|
|
||||||
user-sweep-freq: "1m"
|
|
||||||
|
|
||||||
webfinger-max-size: 250
|
|
||||||
webfinger-ttl: "24h"
|
|
||||||
webfinger-sweep-freq: "15m"
|
|
||||||
|
|
||||||
######################
|
######################
|
||||||
##### WEB CONFIG #####
|
##### WEB CONFIG #####
|
||||||
|
|
3
go.mod
3
go.mod
|
@ -5,7 +5,7 @@ go 1.20
|
||||||
require (
|
require (
|
||||||
codeberg.org/gruf/go-bytesize v1.0.2
|
codeberg.org/gruf/go-bytesize v1.0.2
|
||||||
codeberg.org/gruf/go-byteutil v1.1.2
|
codeberg.org/gruf/go-byteutil v1.1.2
|
||||||
codeberg.org/gruf/go-cache/v3 v3.4.4
|
codeberg.org/gruf/go-cache/v3 v3.5.3
|
||||||
codeberg.org/gruf/go-debug v1.3.0
|
codeberg.org/gruf/go-debug v1.3.0
|
||||||
codeberg.org/gruf/go-errors/v2 v2.2.0
|
codeberg.org/gruf/go-errors/v2 v2.2.0
|
||||||
codeberg.org/gruf/go-fastcopy v1.1.2
|
codeberg.org/gruf/go-fastcopy v1.1.2
|
||||||
|
@ -16,6 +16,7 @@ require (
|
||||||
codeberg.org/gruf/go-runners v1.6.1
|
codeberg.org/gruf/go-runners v1.6.1
|
||||||
codeberg.org/gruf/go-sched v1.2.3
|
codeberg.org/gruf/go-sched v1.2.3
|
||||||
codeberg.org/gruf/go-store/v2 v2.2.2
|
codeberg.org/gruf/go-store/v2 v2.2.2
|
||||||
|
github.com/DmitriyVTitov/size v1.5.0
|
||||||
github.com/KimMachineGun/automemlimit v0.2.6
|
github.com/KimMachineGun/automemlimit v0.2.6
|
||||||
github.com/abema/go-mp4 v0.11.0
|
github.com/abema/go-mp4 v0.11.0
|
||||||
github.com/buckket/go-blurhash v1.1.0
|
github.com/buckket/go-blurhash v1.1.0
|
||||||
|
|
8
go.sum
8
go.sum
|
@ -48,8 +48,8 @@ codeberg.org/gruf/go-bytesize v1.0.2/go.mod h1:n/GU8HzL9f3UNp/mUKyr1qVmTlj7+xacp
|
||||||
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
codeberg.org/gruf/go-byteutil v1.0.0/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
||||||
codeberg.org/gruf/go-byteutil v1.1.2 h1:TQLZtTxTNca9xEfDIndmo7nBYxeS94nrv/9DS3Nk5Tw=
|
codeberg.org/gruf/go-byteutil v1.1.2 h1:TQLZtTxTNca9xEfDIndmo7nBYxeS94nrv/9DS3Nk5Tw=
|
||||||
codeberg.org/gruf/go-byteutil v1.1.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
codeberg.org/gruf/go-byteutil v1.1.2/go.mod h1:cWM3tgMCroSzqoBXUXMhvxTxYJp+TbCr6ioISRY5vSU=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.4.4 h1:V0A3EzjhzhULOydD16pwa2DRDwF67OuuP4ORnm//7p8=
|
codeberg.org/gruf/go-cache/v3 v3.5.3 h1:CRO2syVQxT/JbqDnUxzjeJkLInihEmTlJOkrOgkTmqI=
|
||||||
codeberg.org/gruf/go-cache/v3 v3.4.4/go.mod h1:pTeVPEb9DshXUkd8Dg76UcsLpU6EC/tXQ2qb+JrmxEc=
|
codeberg.org/gruf/go-cache/v3 v3.5.3/go.mod h1:NbsGQUgEdNFd631WSasvCHIVAaY9ovuiSeoBwtsIeDc=
|
||||||
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
|
codeberg.org/gruf/go-debug v1.3.0 h1:PIRxQiWUFKtGOGZFdZ3Y0pqyfI0Xr87j224IYe2snZs=
|
||||||
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
codeberg.org/gruf/go-debug v1.3.0/go.mod h1:N+vSy9uJBQgpQcJUqjctvqFz7tBHJf+S/PIjLILzpLg=
|
||||||
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
codeberg.org/gruf/go-errors/v2 v2.0.0/go.mod h1:ZRhbdhvgoUA3Yw6e56kd9Ox984RrvbEFC2pOXyHDJP4=
|
||||||
|
@ -87,6 +87,8 @@ codeberg.org/gruf/go-store/v2 v2.2.2/go.mod h1:QRM3LUAfYyoGMWLTqA1WzohxQgYqPFiVv
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
|
github.com/DmitriyVTitov/size v1.5.0 h1:/PzqxYrOyOUX1BXj6J9OuVRVGe+66VL4D9FlUaW515g=
|
||||||
|
github.com/DmitriyVTitov/size v1.5.0/go.mod h1:le6rNI4CoLQV1b9gzp1+3d7hMAD/uu2QcJ+aYbNgiU0=
|
||||||
github.com/KimMachineGun/automemlimit v0.2.6 h1:tQFriVTcIteUkV5EgU9iz03eDY36T8JU5RAjP2r6Kt0=
|
github.com/KimMachineGun/automemlimit v0.2.6 h1:tQFriVTcIteUkV5EgU9iz03eDY36T8JU5RAjP2r6Kt0=
|
||||||
github.com/KimMachineGun/automemlimit v0.2.6/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0=
|
github.com/KimMachineGun/automemlimit v0.2.6/go.mod h1:pJhTW/nWJMj6SnWSU2TEKSlCaM+1N5Mej+IfS/5/Ol0=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
@ -256,6 +258,8 @@ github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
|
||||||
|
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||||
|
|
|
@ -204,3 +204,36 @@ func (c *Caches) setuphooks() {
|
||||||
c.Visibility.Invalidate("RequesterID", user.AccountID)
|
c.Visibility.Invalidate("RequesterID", user.AccountID)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sweep will sweep all the available caches to ensure none
|
||||||
|
// are above threshold percent full to their total capacity.
|
||||||
|
//
|
||||||
|
// This helps with cache performance, as a full cache will
|
||||||
|
// require an eviction on every single write, which adds
|
||||||
|
// significant overhead to all cache writes.
|
||||||
|
func (c *Caches) Sweep(threshold float64) {
|
||||||
|
c.GTS.Account().Trim(threshold)
|
||||||
|
c.GTS.AccountNote().Trim(threshold)
|
||||||
|
c.GTS.Block().Trim(threshold)
|
||||||
|
c.GTS.BlockIDs().Trim(threshold)
|
||||||
|
c.GTS.Emoji().Trim(threshold)
|
||||||
|
c.GTS.EmojiCategory().Trim(threshold)
|
||||||
|
c.GTS.Follow().Trim(threshold)
|
||||||
|
c.GTS.FollowIDs().Trim(threshold)
|
||||||
|
c.GTS.FollowRequest().Trim(threshold)
|
||||||
|
c.GTS.FollowRequestIDs().Trim(threshold)
|
||||||
|
c.GTS.Instance().Trim(threshold)
|
||||||
|
c.GTS.List().Trim(threshold)
|
||||||
|
c.GTS.ListEntry().Trim(threshold)
|
||||||
|
c.GTS.Marker().Trim(threshold)
|
||||||
|
c.GTS.Media().Trim(threshold)
|
||||||
|
c.GTS.Mention().Trim(threshold)
|
||||||
|
c.GTS.Notification().Trim(threshold)
|
||||||
|
c.GTS.Report().Trim(threshold)
|
||||||
|
c.GTS.Status().Trim(threshold)
|
||||||
|
c.GTS.StatusFave().Trim(threshold)
|
||||||
|
c.GTS.Tag().Trim(threshold)
|
||||||
|
c.GTS.Tombstone().Trim(threshold)
|
||||||
|
c.GTS.User().Trim(threshold)
|
||||||
|
c.Visibility.Trim(threshold)
|
||||||
|
}
|
||||||
|
|
|
@ -18,11 +18,15 @@
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-cache/v3/result"
|
"codeberg.org/gruf/go-cache/v3/result"
|
||||||
|
"codeberg.org/gruf/go-cache/v3/simple"
|
||||||
"codeberg.org/gruf/go-cache/v3/ttl"
|
"codeberg.org/gruf/go-cache/v3/ttl"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/cache/domain"
|
"github.com/superseriousbusiness/gotosocial/internal/cache/domain"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GTSCaches struct {
|
type GTSCaches struct {
|
||||||
|
@ -52,7 +56,7 @@ type GTSCaches struct {
|
||||||
user *result.Cache[*gtsmodel.User]
|
user *result.Cache[*gtsmodel.User]
|
||||||
|
|
||||||
// TODO: move out of GTS caches since unrelated to DB.
|
// TODO: move out of GTS caches since unrelated to DB.
|
||||||
webfinger *ttl.Cache[string, string]
|
webfinger *ttl.Cache[string, string] // TTL=24hr, sweep=5min
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init will initialize all the gtsmodel caches in this collection.
|
// Init will initialize all the gtsmodel caches in this collection.
|
||||||
|
@ -87,98 +91,14 @@ func (c *GTSCaches) Init() {
|
||||||
|
|
||||||
// Start will attempt to start all of the gtsmodel caches, or panic.
|
// Start will attempt to start all of the gtsmodel caches, or panic.
|
||||||
func (c *GTSCaches) Start() {
|
func (c *GTSCaches) Start() {
|
||||||
tryStart(c.account, config.GetCacheGTSAccountSweepFreq())
|
|
||||||
tryStart(c.accountNote, config.GetCacheGTSAccountNoteSweepFreq())
|
|
||||||
tryStart(c.block, config.GetCacheGTSBlockSweepFreq())
|
|
||||||
tryUntil("starting block IDs cache", 5, func() bool {
|
|
||||||
if sweep := config.GetCacheGTSBlockIDsSweepFreq(); sweep > 0 {
|
|
||||||
return c.blockIDs.Start(sweep)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
tryStart(c.emoji, config.GetCacheGTSEmojiSweepFreq())
|
|
||||||
tryStart(c.emojiCategory, config.GetCacheGTSEmojiCategorySweepFreq())
|
|
||||||
tryStart(c.follow, config.GetCacheGTSFollowSweepFreq())
|
|
||||||
tryUntil("starting follow IDs cache", 5, func() bool {
|
|
||||||
if sweep := config.GetCacheGTSFollowIDsSweepFreq(); sweep > 0 {
|
|
||||||
return c.followIDs.Start(sweep)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
tryStart(c.followRequest, config.GetCacheGTSFollowRequestSweepFreq())
|
|
||||||
tryUntil("starting follow request IDs cache", 5, func() bool {
|
|
||||||
if sweep := config.GetCacheGTSFollowRequestIDsSweepFreq(); sweep > 0 {
|
|
||||||
return c.followRequestIDs.Start(sweep)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
tryStart(c.instance, config.GetCacheGTSInstanceSweepFreq())
|
|
||||||
tryStart(c.list, config.GetCacheGTSListSweepFreq())
|
|
||||||
tryStart(c.listEntry, config.GetCacheGTSListEntrySweepFreq())
|
|
||||||
tryStart(c.marker, config.GetCacheGTSMarkerSweepFreq())
|
|
||||||
tryStart(c.media, config.GetCacheGTSMediaSweepFreq())
|
|
||||||
tryStart(c.mention, config.GetCacheGTSMentionSweepFreq())
|
|
||||||
tryStart(c.notification, config.GetCacheGTSNotificationSweepFreq())
|
|
||||||
tryStart(c.report, config.GetCacheGTSReportSweepFreq())
|
|
||||||
tryStart(c.status, config.GetCacheGTSStatusSweepFreq())
|
|
||||||
tryStart(c.statusFave, config.GetCacheGTSStatusFaveSweepFreq())
|
|
||||||
tryStart(c.tag, config.GetCacheGTSTagSweepFreq())
|
|
||||||
tryStart(c.tombstone, config.GetCacheGTSTombstoneSweepFreq())
|
|
||||||
tryStart(c.user, config.GetCacheGTSUserSweepFreq())
|
|
||||||
tryUntil("starting *gtsmodel.Webfinger cache", 5, func() bool {
|
tryUntil("starting *gtsmodel.Webfinger cache", 5, func() bool {
|
||||||
if sweep := config.GetCacheGTSWebfingerSweepFreq(); sweep > 0 {
|
return c.webfinger.Start(5 * time.Minute)
|
||||||
return c.webfinger.Start(sweep)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop will attempt to stop all of the gtsmodel caches, or panic.
|
// Stop will attempt to stop all of the gtsmodel caches, or panic.
|
||||||
func (c *GTSCaches) Stop() {
|
func (c *GTSCaches) Stop() {
|
||||||
tryStop(c.account, config.GetCacheGTSAccountSweepFreq())
|
tryUntil("stopping *gtsmodel.Webfinger cache", 5, c.webfinger.Stop)
|
||||||
tryStop(c.accountNote, config.GetCacheGTSAccountNoteSweepFreq())
|
|
||||||
tryStop(c.block, config.GetCacheGTSBlockSweepFreq())
|
|
||||||
tryUntil("stopping block IDs cache", 5, func() bool {
|
|
||||||
if config.GetCacheGTSBlockIDsSweepFreq() > 0 {
|
|
||||||
return c.blockIDs.Stop()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
tryStop(c.emoji, config.GetCacheGTSEmojiSweepFreq())
|
|
||||||
tryStop(c.emojiCategory, config.GetCacheGTSEmojiCategorySweepFreq())
|
|
||||||
tryStop(c.follow, config.GetCacheGTSFollowSweepFreq())
|
|
||||||
tryUntil("stopping follow IDs cache", 5, func() bool {
|
|
||||||
if config.GetCacheGTSFollowIDsSweepFreq() > 0 {
|
|
||||||
return c.followIDs.Stop()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
tryStop(c.followRequest, config.GetCacheGTSFollowRequestSweepFreq())
|
|
||||||
tryUntil("stopping follow request IDs cache", 5, func() bool {
|
|
||||||
if config.GetCacheGTSFollowRequestIDsSweepFreq() > 0 {
|
|
||||||
return c.followRequestIDs.Stop()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
tryStop(c.instance, config.GetCacheGTSInstanceSweepFreq())
|
|
||||||
tryStop(c.list, config.GetCacheGTSListSweepFreq())
|
|
||||||
tryStop(c.listEntry, config.GetCacheGTSListEntrySweepFreq())
|
|
||||||
tryStop(c.marker, config.GetCacheGTSMarkerSweepFreq())
|
|
||||||
tryStop(c.media, config.GetCacheGTSMediaSweepFreq())
|
|
||||||
tryStop(c.mention, config.GetCacheGTSNotificationSweepFreq())
|
|
||||||
tryStop(c.notification, config.GetCacheGTSNotificationSweepFreq())
|
|
||||||
tryStop(c.report, config.GetCacheGTSReportSweepFreq())
|
|
||||||
tryStop(c.status, config.GetCacheGTSStatusSweepFreq())
|
|
||||||
tryStop(c.statusFave, config.GetCacheGTSStatusFaveSweepFreq())
|
|
||||||
tryStop(c.tag, config.GetCacheGTSTagSweepFreq())
|
|
||||||
tryStop(c.tombstone, config.GetCacheGTSTombstoneSweepFreq())
|
|
||||||
tryStop(c.user, config.GetCacheGTSUserSweepFreq())
|
|
||||||
tryUntil("stopping *gtsmodel.Webfinger cache", 5, func() bool {
|
|
||||||
if config.GetCacheGTSWebfingerSweepFreq() > 0 {
|
|
||||||
return c.webfinger.Stop()
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Account provides access to the gtsmodel Account database cache.
|
// Account provides access to the gtsmodel Account database cache.
|
||||||
|
@ -315,6 +235,14 @@ func (c *GTSCaches) Webfinger() *ttl.Cache[string, string] {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initAccount() {
|
func (c *GTSCaches) initAccount() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofAccount(), // model in-mem size.
|
||||||
|
config.GetCacheAccountMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Account cache size = %d", cap)
|
||||||
|
|
||||||
c.account = result.New([]result.Lookup{
|
c.account = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -329,12 +257,19 @@ func (c *GTSCaches) initAccount() {
|
||||||
a2 := new(gtsmodel.Account)
|
a2 := new(gtsmodel.Account)
|
||||||
*a2 = *a1
|
*a2 = *a1
|
||||||
return a2
|
return a2
|
||||||
}, config.GetCacheGTSAccountMaxSize())
|
}, cap)
|
||||||
c.account.SetTTL(config.GetCacheGTSAccountTTL(), true)
|
|
||||||
c.account.IgnoreErrors(ignoreErrors)
|
c.account.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initAccountNote() {
|
func (c *GTSCaches) initAccountNote() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofAccountNote(), // model in-mem size.
|
||||||
|
config.GetCacheAccountNoteMemRatio(),
|
||||||
|
)
|
||||||
|
log.Infof(nil, "AccountNote cache size = %d", cap)
|
||||||
|
|
||||||
c.accountNote = result.New([]result.Lookup{
|
c.accountNote = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "AccountID.TargetAccountID"},
|
{Name: "AccountID.TargetAccountID"},
|
||||||
|
@ -342,12 +277,20 @@ func (c *GTSCaches) initAccountNote() {
|
||||||
n2 := new(gtsmodel.AccountNote)
|
n2 := new(gtsmodel.AccountNote)
|
||||||
*n2 = *n1
|
*n2 = *n1
|
||||||
return n2
|
return n2
|
||||||
}, config.GetCacheGTSAccountNoteMaxSize())
|
}, cap)
|
||||||
c.accountNote.SetTTL(config.GetCacheGTSAccountNoteTTL(), true)
|
|
||||||
c.accountNote.IgnoreErrors(ignoreErrors)
|
c.accountNote.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initBlock() {
|
func (c *GTSCaches) initBlock() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofBlock(), // model in-mem size.
|
||||||
|
config.GetCacheBlockMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Block cache size = %d", cap)
|
||||||
|
|
||||||
c.block = result.New([]result.Lookup{
|
c.block = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -358,16 +301,22 @@ func (c *GTSCaches) initBlock() {
|
||||||
b2 := new(gtsmodel.Block)
|
b2 := new(gtsmodel.Block)
|
||||||
*b2 = *b1
|
*b2 = *b1
|
||||||
return b2
|
return b2
|
||||||
}, config.GetCacheGTSBlockMaxSize())
|
}, cap)
|
||||||
c.block.SetTTL(config.GetCacheGTSBlockTTL(), true)
|
|
||||||
c.block.IgnoreErrors(ignoreErrors)
|
c.block.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initBlockIDs() {
|
func (c *GTSCaches) initBlockIDs() {
|
||||||
c.blockIDs = &SliceCache[string]{Cache: ttl.New[string, []string](
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateSliceCacheMax(
|
||||||
|
config.GetCacheBlockIDsMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Block IDs cache size = %d", cap)
|
||||||
|
|
||||||
|
c.blockIDs = &SliceCache[string]{Cache: simple.New[string, []string](
|
||||||
0,
|
0,
|
||||||
config.GetCacheGTSBlockIDsMaxSize(),
|
cap,
|
||||||
config.GetCacheGTSBlockIDsTTL(),
|
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,6 +325,14 @@ func (c *GTSCaches) initDomainBlock() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initEmoji() {
|
func (c *GTSCaches) initEmoji() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofEmoji(), // model in-mem size.
|
||||||
|
config.GetCacheEmojiMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Emoji cache size = %d", cap)
|
||||||
|
|
||||||
c.emoji = result.New([]result.Lookup{
|
c.emoji = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -386,12 +343,20 @@ func (c *GTSCaches) initEmoji() {
|
||||||
e2 := new(gtsmodel.Emoji)
|
e2 := new(gtsmodel.Emoji)
|
||||||
*e2 = *e1
|
*e2 = *e1
|
||||||
return e2
|
return e2
|
||||||
}, config.GetCacheGTSEmojiMaxSize())
|
}, cap)
|
||||||
c.emoji.SetTTL(config.GetCacheGTSEmojiTTL(), true)
|
|
||||||
c.emoji.IgnoreErrors(ignoreErrors)
|
c.emoji.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initEmojiCategory() {
|
func (c *GTSCaches) initEmojiCategory() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofEmojiCategory(), // model in-mem size.
|
||||||
|
config.GetCacheEmojiCategoryMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "EmojiCategory cache size = %d", cap)
|
||||||
|
|
||||||
c.emojiCategory = result.New([]result.Lookup{
|
c.emojiCategory = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "Name"},
|
{Name: "Name"},
|
||||||
|
@ -399,12 +364,20 @@ func (c *GTSCaches) initEmojiCategory() {
|
||||||
c2 := new(gtsmodel.EmojiCategory)
|
c2 := new(gtsmodel.EmojiCategory)
|
||||||
*c2 = *c1
|
*c2 = *c1
|
||||||
return c2
|
return c2
|
||||||
}, config.GetCacheGTSEmojiCategoryMaxSize())
|
}, cap)
|
||||||
c.emojiCategory.SetTTL(config.GetCacheGTSEmojiCategoryTTL(), true)
|
|
||||||
c.emojiCategory.IgnoreErrors(ignoreErrors)
|
c.emojiCategory.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initFollow() {
|
func (c *GTSCaches) initFollow() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofFollow(), // model in-mem size.
|
||||||
|
config.GetCacheFollowMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Follow cache size = %d", cap)
|
||||||
|
|
||||||
c.follow = result.New([]result.Lookup{
|
c.follow = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -415,19 +388,34 @@ func (c *GTSCaches) initFollow() {
|
||||||
f2 := new(gtsmodel.Follow)
|
f2 := new(gtsmodel.Follow)
|
||||||
*f2 = *f1
|
*f2 = *f1
|
||||||
return f2
|
return f2
|
||||||
}, config.GetCacheGTSFollowMaxSize())
|
}, cap)
|
||||||
c.follow.SetTTL(config.GetCacheGTSFollowTTL(), true)
|
|
||||||
|
c.follow.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initFollowIDs() {
|
func (c *GTSCaches) initFollowIDs() {
|
||||||
c.followIDs = &SliceCache[string]{Cache: ttl.New[string, []string](
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateSliceCacheMax(
|
||||||
|
config.GetCacheFollowIDsMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Follow IDs cache size = %d", cap)
|
||||||
|
|
||||||
|
c.followIDs = &SliceCache[string]{Cache: simple.New[string, []string](
|
||||||
0,
|
0,
|
||||||
config.GetCacheGTSFollowIDsMaxSize(),
|
cap,
|
||||||
config.GetCacheGTSFollowIDsTTL(),
|
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initFollowRequest() {
|
func (c *GTSCaches) initFollowRequest() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofFollowRequest(), // model in-mem size.
|
||||||
|
config.GetCacheFollowRequestMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "FollowRequest cache size = %d", cap)
|
||||||
|
|
||||||
c.followRequest = result.New([]result.Lookup{
|
c.followRequest = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -438,19 +426,34 @@ func (c *GTSCaches) initFollowRequest() {
|
||||||
f2 := new(gtsmodel.FollowRequest)
|
f2 := new(gtsmodel.FollowRequest)
|
||||||
*f2 = *f1
|
*f2 = *f1
|
||||||
return f2
|
return f2
|
||||||
}, config.GetCacheGTSFollowRequestMaxSize())
|
}, cap)
|
||||||
c.followRequest.SetTTL(config.GetCacheGTSFollowRequestTTL(), true)
|
|
||||||
|
c.followRequest.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initFollowRequestIDs() {
|
func (c *GTSCaches) initFollowRequestIDs() {
|
||||||
c.followRequestIDs = &SliceCache[string]{Cache: ttl.New[string, []string](
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateSliceCacheMax(
|
||||||
|
config.GetCacheFollowRequestIDsMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Follow Request IDs cache size = %d", cap)
|
||||||
|
|
||||||
|
c.followRequestIDs = &SliceCache[string]{Cache: simple.New[string, []string](
|
||||||
0,
|
0,
|
||||||
config.GetCacheGTSFollowRequestIDsMaxSize(),
|
cap,
|
||||||
config.GetCacheGTSFollowRequestIDsTTL(),
|
|
||||||
)}
|
)}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initInstance() {
|
func (c *GTSCaches) initInstance() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofInstance(), // model in-mem size.
|
||||||
|
config.GetCacheInstanceMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Instance cache size = %d", cap)
|
||||||
|
|
||||||
c.instance = result.New([]result.Lookup{
|
c.instance = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "Domain"},
|
{Name: "Domain"},
|
||||||
|
@ -458,24 +461,40 @@ func (c *GTSCaches) initInstance() {
|
||||||
i2 := new(gtsmodel.Instance)
|
i2 := new(gtsmodel.Instance)
|
||||||
*i2 = *i1
|
*i2 = *i1
|
||||||
return i1
|
return i1
|
||||||
}, config.GetCacheGTSInstanceMaxSize())
|
}, cap)
|
||||||
c.instance.SetTTL(config.GetCacheGTSInstanceTTL(), true)
|
|
||||||
c.emojiCategory.IgnoreErrors(ignoreErrors)
|
c.instance.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initList() {
|
func (c *GTSCaches) initList() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofList(), // model in-mem size.
|
||||||
|
config.GetCacheListMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "List cache size = %d", cap)
|
||||||
|
|
||||||
c.list = result.New([]result.Lookup{
|
c.list = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
}, func(l1 *gtsmodel.List) *gtsmodel.List {
|
}, func(l1 *gtsmodel.List) *gtsmodel.List {
|
||||||
l2 := new(gtsmodel.List)
|
l2 := new(gtsmodel.List)
|
||||||
*l2 = *l1
|
*l2 = *l1
|
||||||
return l2
|
return l2
|
||||||
}, config.GetCacheGTSListMaxSize())
|
}, cap)
|
||||||
c.list.SetTTL(config.GetCacheGTSListTTL(), true)
|
|
||||||
c.list.IgnoreErrors(ignoreErrors)
|
c.list.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initListEntry() {
|
func (c *GTSCaches) initListEntry() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofListEntry(), // model in-mem size.
|
||||||
|
config.GetCacheListEntryMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "ListEntry cache size = %d", cap)
|
||||||
|
|
||||||
c.listEntry = result.New([]result.Lookup{
|
c.listEntry = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "ListID", Multi: true},
|
{Name: "ListID", Multi: true},
|
||||||
|
@ -484,48 +503,80 @@ func (c *GTSCaches) initListEntry() {
|
||||||
l2 := new(gtsmodel.ListEntry)
|
l2 := new(gtsmodel.ListEntry)
|
||||||
*l2 = *l1
|
*l2 = *l1
|
||||||
return l2
|
return l2
|
||||||
}, config.GetCacheGTSListEntryMaxSize())
|
}, cap)
|
||||||
c.list.SetTTL(config.GetCacheGTSListEntryTTL(), true)
|
|
||||||
c.list.IgnoreErrors(ignoreErrors)
|
c.listEntry.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initMarker() {
|
func (c *GTSCaches) initMarker() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofMarker(), // model in-mem size.
|
||||||
|
config.GetCacheMarkerMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Marker cache size = %d", cap)
|
||||||
|
|
||||||
c.marker = result.New([]result.Lookup{
|
c.marker = result.New([]result.Lookup{
|
||||||
{Name: "AccountID.Name"},
|
{Name: "AccountID.Name"},
|
||||||
}, func(m1 *gtsmodel.Marker) *gtsmodel.Marker {
|
}, func(m1 *gtsmodel.Marker) *gtsmodel.Marker {
|
||||||
m2 := new(gtsmodel.Marker)
|
m2 := new(gtsmodel.Marker)
|
||||||
*m2 = *m1
|
*m2 = *m1
|
||||||
return m2
|
return m2
|
||||||
}, config.GetCacheGTSMarkerMaxSize())
|
}, cap)
|
||||||
c.marker.SetTTL(config.GetCacheGTSMarkerTTL(), true)
|
|
||||||
c.marker.IgnoreErrors(ignoreErrors)
|
c.marker.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initMedia() {
|
func (c *GTSCaches) initMedia() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofMedia(), // model in-mem size.
|
||||||
|
config.GetCacheMediaMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Media cache size = %d", cap)
|
||||||
|
|
||||||
c.media = result.New([]result.Lookup{
|
c.media = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
}, func(m1 *gtsmodel.MediaAttachment) *gtsmodel.MediaAttachment {
|
}, func(m1 *gtsmodel.MediaAttachment) *gtsmodel.MediaAttachment {
|
||||||
m2 := new(gtsmodel.MediaAttachment)
|
m2 := new(gtsmodel.MediaAttachment)
|
||||||
*m2 = *m1
|
*m2 = *m1
|
||||||
return m2
|
return m2
|
||||||
}, config.GetCacheGTSMediaMaxSize())
|
}, cap)
|
||||||
c.media.SetTTL(config.GetCacheGTSMediaTTL(), true)
|
|
||||||
c.media.IgnoreErrors(ignoreErrors)
|
c.media.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initMention() {
|
func (c *GTSCaches) initMention() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofMention(), // model in-mem size.
|
||||||
|
config.GetCacheMentionMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Mention cache size = %d", cap)
|
||||||
|
|
||||||
c.mention = result.New([]result.Lookup{
|
c.mention = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
|
}, func(m1 *gtsmodel.Mention) *gtsmodel.Mention {
|
||||||
m2 := new(gtsmodel.Mention)
|
m2 := new(gtsmodel.Mention)
|
||||||
*m2 = *m1
|
*m2 = *m1
|
||||||
return m2
|
return m2
|
||||||
}, config.GetCacheGTSMentionMaxSize())
|
}, cap)
|
||||||
c.mention.SetTTL(config.GetCacheGTSMentionTTL(), true)
|
|
||||||
c.mention.IgnoreErrors(ignoreErrors)
|
c.mention.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initNotification() {
|
func (c *GTSCaches) initNotification() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofNotification(), // model in-mem size.
|
||||||
|
config.GetCacheNotificationMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Notification cache size = %d", cap)
|
||||||
|
|
||||||
c.notification = result.New([]result.Lookup{
|
c.notification = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "NotificationType.TargetAccountID.OriginAccountID.StatusID"},
|
{Name: "NotificationType.TargetAccountID.OriginAccountID.StatusID"},
|
||||||
|
@ -533,24 +584,40 @@ func (c *GTSCaches) initNotification() {
|
||||||
n2 := new(gtsmodel.Notification)
|
n2 := new(gtsmodel.Notification)
|
||||||
*n2 = *n1
|
*n2 = *n1
|
||||||
return n2
|
return n2
|
||||||
}, config.GetCacheGTSNotificationMaxSize())
|
}, cap)
|
||||||
c.notification.SetTTL(config.GetCacheGTSNotificationTTL(), true)
|
|
||||||
c.notification.IgnoreErrors(ignoreErrors)
|
c.notification.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initReport() {
|
func (c *GTSCaches) initReport() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofReport(), // model in-mem size.
|
||||||
|
config.GetCacheReportMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Report cache size = %d", cap)
|
||||||
|
|
||||||
c.report = result.New([]result.Lookup{
|
c.report = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
}, func(r1 *gtsmodel.Report) *gtsmodel.Report {
|
}, func(r1 *gtsmodel.Report) *gtsmodel.Report {
|
||||||
r2 := new(gtsmodel.Report)
|
r2 := new(gtsmodel.Report)
|
||||||
*r2 = *r1
|
*r2 = *r1
|
||||||
return r2
|
return r2
|
||||||
}, config.GetCacheGTSReportMaxSize())
|
}, cap)
|
||||||
c.report.SetTTL(config.GetCacheGTSReportTTL(), true)
|
|
||||||
c.report.IgnoreErrors(ignoreErrors)
|
c.report.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initStatus() {
|
func (c *GTSCaches) initStatus() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofStatus(), // model in-mem size.
|
||||||
|
config.GetCacheStatusMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Status cache size = %d", cap)
|
||||||
|
|
||||||
c.status = result.New([]result.Lookup{
|
c.status = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -559,12 +626,20 @@ func (c *GTSCaches) initStatus() {
|
||||||
s2 := new(gtsmodel.Status)
|
s2 := new(gtsmodel.Status)
|
||||||
*s2 = *s1
|
*s2 = *s1
|
||||||
return s2
|
return s2
|
||||||
}, config.GetCacheGTSStatusMaxSize())
|
}, cap)
|
||||||
c.status.SetTTL(config.GetCacheGTSStatusTTL(), true)
|
|
||||||
c.status.IgnoreErrors(ignoreErrors)
|
c.status.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initStatusFave() {
|
func (c *GTSCaches) initStatusFave() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofStatusFave(), // model in-mem size.
|
||||||
|
config.GetCacheStatusFaveMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "StatusFave cache size = %d", cap)
|
||||||
|
|
||||||
c.statusFave = result.New([]result.Lookup{
|
c.statusFave = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "AccountID.StatusID"},
|
{Name: "AccountID.StatusID"},
|
||||||
|
@ -572,12 +647,20 @@ func (c *GTSCaches) initStatusFave() {
|
||||||
f2 := new(gtsmodel.StatusFave)
|
f2 := new(gtsmodel.StatusFave)
|
||||||
*f2 = *f1
|
*f2 = *f1
|
||||||
return f2
|
return f2
|
||||||
}, config.GetCacheGTSStatusFaveMaxSize())
|
}, cap)
|
||||||
c.status.SetTTL(config.GetCacheGTSStatusFaveTTL(), true)
|
|
||||||
c.status.IgnoreErrors(ignoreErrors)
|
c.statusFave.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initTag() {
|
func (c *GTSCaches) initTag() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofTag(), // model in-mem size.
|
||||||
|
config.GetCacheTagMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Tag cache size = %d", cap)
|
||||||
|
|
||||||
c.tag = result.New([]result.Lookup{
|
c.tag = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "Name"},
|
{Name: "Name"},
|
||||||
|
@ -585,12 +668,20 @@ func (c *GTSCaches) initTag() {
|
||||||
m2 := new(gtsmodel.Tag)
|
m2 := new(gtsmodel.Tag)
|
||||||
*m2 = *m1
|
*m2 = *m1
|
||||||
return m2
|
return m2
|
||||||
}, config.GetCacheGTSTagMaxSize())
|
}, cap)
|
||||||
c.tag.SetTTL(config.GetCacheGTSTagTTL(), true)
|
|
||||||
c.tag.IgnoreErrors(ignoreErrors)
|
c.tag.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initTombstone() {
|
func (c *GTSCaches) initTombstone() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofTombstone(), // model in-mem size.
|
||||||
|
config.GetCacheTombstoneMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Tombstone cache size = %d", cap)
|
||||||
|
|
||||||
c.tombstone = result.New([]result.Lookup{
|
c.tombstone = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "URI"},
|
{Name: "URI"},
|
||||||
|
@ -598,12 +689,20 @@ func (c *GTSCaches) initTombstone() {
|
||||||
t2 := new(gtsmodel.Tombstone)
|
t2 := new(gtsmodel.Tombstone)
|
||||||
*t2 = *t1
|
*t2 = *t1
|
||||||
return t2
|
return t2
|
||||||
}, config.GetCacheGTSTombstoneMaxSize())
|
}, cap)
|
||||||
c.tombstone.SetTTL(config.GetCacheGTSTombstoneTTL(), true)
|
|
||||||
c.tombstone.IgnoreErrors(ignoreErrors)
|
c.tombstone.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initUser() {
|
func (c *GTSCaches) initUser() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofUser(), // model in-mem size.
|
||||||
|
config.GetCacheUserMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "User cache size = %d", cap)
|
||||||
|
|
||||||
c.user = result.New([]result.Lookup{
|
c.user = result.New([]result.Lookup{
|
||||||
{Name: "ID"},
|
{Name: "ID"},
|
||||||
{Name: "AccountID"},
|
{Name: "AccountID"},
|
||||||
|
@ -614,15 +713,23 @@ func (c *GTSCaches) initUser() {
|
||||||
u2 := new(gtsmodel.User)
|
u2 := new(gtsmodel.User)
|
||||||
*u2 = *u1
|
*u2 = *u1
|
||||||
return u2
|
return u2
|
||||||
}, config.GetCacheGTSUserMaxSize())
|
}, cap)
|
||||||
c.user.SetTTL(config.GetCacheGTSUserTTL(), true)
|
|
||||||
c.user.IgnoreErrors(ignoreErrors)
|
c.user.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *GTSCaches) initWebfinger() {
|
func (c *GTSCaches) initWebfinger() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateCacheMax(
|
||||||
|
sizeofURIStr, sizeofURIStr,
|
||||||
|
config.GetCacheWebfingerMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Webfinger cache size = %d", cap)
|
||||||
|
|
||||||
c.webfinger = ttl.New[string, string](
|
c.webfinger = ttl.New[string, string](
|
||||||
0,
|
0,
|
||||||
config.GetCacheGTSWebfingerMaxSize(),
|
cap,
|
||||||
config.GetCacheGTSWebfingerTTL(),
|
24*time.Hour,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,501 @@
|
||||||
|
// GoToSocial
|
||||||
|
// Copyright (C) GoToSocial Authors admin@gotosocial.org
|
||||||
|
// SPDX-License-Identifier: AGPL-3.0-or-later
|
||||||
|
//
|
||||||
|
// This program is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU Affero General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// This program is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU Affero General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU Affero General Public License
|
||||||
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package cache
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rsa"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-cache/v3/simple"
|
||||||
|
"github.com/DmitriyVTitov/size"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/ap"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/gtsmodel"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/id"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// example data values.
|
||||||
|
exampleID = id.Highest
|
||||||
|
exampleURI = "https://social.bbc/users/ItsMePrinceCharlesInit"
|
||||||
|
exampleText = `
|
||||||
|
oh no me nan's gone and done it :shocked:
|
||||||
|
|
||||||
|
she fuckin killed the king :regicide:
|
||||||
|
|
||||||
|
nan what have you done :shocked:
|
||||||
|
|
||||||
|
no nan put down the knife, don't go after the landlords next! :knife:
|
||||||
|
|
||||||
|
you'll make society more equitable for all if you're not careful! :hammer_sickle:
|
||||||
|
|
||||||
|
#JustNanProblems #WhatWillSheDoNext #MaybeItWasntSuchABadThingAfterAll
|
||||||
|
`
|
||||||
|
|
||||||
|
exampleTextSmall = "Small problem lads, me nan's gone on a bit of a rampage"
|
||||||
|
exampleUsername = "@SexHaver1969"
|
||||||
|
|
||||||
|
// ID string size in memory (is always 26 char ULID).
|
||||||
|
sizeofIDStr = unsafe.Sizeof(exampleID)
|
||||||
|
|
||||||
|
// URI string size in memory (use some random example URI).
|
||||||
|
sizeofURIStr = unsafe.Sizeof(exampleURI)
|
||||||
|
|
||||||
|
// ID slice size in memory (using some estimate of length = 250).
|
||||||
|
sizeofIDSlice = unsafe.Sizeof([]string{}) + 250*sizeofIDStr
|
||||||
|
|
||||||
|
// result cache key size estimate which is tricky. it can
|
||||||
|
// be a serialized string of almost any type, so we pick a
|
||||||
|
// nice serialized key size on the upper end of normal.
|
||||||
|
sizeofResultKey = 2 * sizeofIDStr
|
||||||
|
)
|
||||||
|
|
||||||
|
// calculateSliceCacheMax calculates the maximum capacity for a slice cache with given individual ratio.
|
||||||
|
func calculateSliceCacheMax(ratio float64) int {
|
||||||
|
return calculateCacheMax(sizeofIDStr, sizeofIDSlice, ratio)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateResultCacheMax calculates the maximum cache capacity for a result
|
||||||
|
// cache's individual ratio number, and the size of the struct model in memory.
|
||||||
|
func calculateResultCacheMax(structSz uintptr, ratio float64) int {
|
||||||
|
// Estimate a worse-case scenario of extra lookup hash maps,
|
||||||
|
// where lookups are the no. "keys" each result can be found under
|
||||||
|
const lookups = 10
|
||||||
|
|
||||||
|
// Calculate the extra cache lookup map overheads.
|
||||||
|
totalLookupKeySz := uintptr(lookups) * sizeofResultKey
|
||||||
|
totalLookupValSz := uintptr(lookups) * unsafe.Sizeof(uint64(0))
|
||||||
|
|
||||||
|
// Primary cache sizes.
|
||||||
|
pkeySz := unsafe.Sizeof(uint64(0))
|
||||||
|
pvalSz := structSz
|
||||||
|
|
||||||
|
// The result cache wraps each struct result in a wrapping
|
||||||
|
// struct with further information, and possible error. This
|
||||||
|
// also needs to be taken into account when calculating value.
|
||||||
|
const resultValueOverhead = unsafe.Sizeof(&struct {
|
||||||
|
_ int64
|
||||||
|
_ []any
|
||||||
|
_ any
|
||||||
|
_ error
|
||||||
|
}{})
|
||||||
|
|
||||||
|
return calculateCacheMax(
|
||||||
|
pkeySz+totalLookupKeySz,
|
||||||
|
pvalSz+totalLookupValSz+resultValueOverhead,
|
||||||
|
ratio,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// calculateCacheMax calculates the maximum cache capacity for a cache's
|
||||||
|
// individual ratio number, and key + value object sizes in memory.
|
||||||
|
func calculateCacheMax(keySz, valSz uintptr, ratio float64) int {
|
||||||
|
if ratio < 0 {
|
||||||
|
// Negative ratios are a secret little trick
|
||||||
|
// to manually set the cache capacity sizes.
|
||||||
|
return int(-1 * ratio)
|
||||||
|
}
|
||||||
|
|
||||||
|
// see: https://golang.org/src/runtime/map.go
|
||||||
|
const emptyBucketOverhead = 10.79
|
||||||
|
|
||||||
|
// This takes into account (roughly) that the underlying simple cache library wraps
|
||||||
|
// elements within a simple.Entry{}, and the ordered map wraps each in a linked list elem.
|
||||||
|
const cacheElemOverhead = unsafe.Sizeof(simple.Entry{}) + unsafe.Sizeof(struct {
|
||||||
|
key, value interface{}
|
||||||
|
next, prev uintptr
|
||||||
|
}{})
|
||||||
|
|
||||||
|
// The inputted memory ratio does not take into account the
|
||||||
|
// total of all ratios, so divide it here to get perc. ratio.
|
||||||
|
totalRatio := ratio / totalOfRatios()
|
||||||
|
|
||||||
|
// TODO: we should also further weight this ratio depending
|
||||||
|
// on the combined keySz + valSz as a ratio of all available
|
||||||
|
// cache model memories. otherwise you can end up with a
|
||||||
|
// low-ratio cache of tiny models with larger capacity than
|
||||||
|
// a high-ratio cache of large models.
|
||||||
|
|
||||||
|
// Get max available cache memory, calculating max for
|
||||||
|
// this cache by multiplying by this cache's mem ratio.
|
||||||
|
maxMem := config.GetCacheMemoryTarget()
|
||||||
|
fMaxMem := float64(maxMem) * totalRatio
|
||||||
|
|
||||||
|
// Cast to useable types.
|
||||||
|
fKeySz := float64(keySz)
|
||||||
|
fValSz := float64(valSz)
|
||||||
|
|
||||||
|
// Calculated using the internal cache map size:
|
||||||
|
// (($keysz + $valsz) * $len) + ($len * $allOverheads) = $memSz
|
||||||
|
return int(fMaxMem / (fKeySz + fValSz + emptyBucketOverhead + float64(cacheElemOverhead)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// totalOfRatios returns the total of all cache ratios added together.
|
||||||
|
func totalOfRatios() float64 {
|
||||||
|
// NOTE: this is not performant calculating
|
||||||
|
// this every damn time (mainly the mutex unlocks
|
||||||
|
// required to access each config var). fortunately
|
||||||
|
// we only do this on init so fuck it :D
|
||||||
|
return 0 +
|
||||||
|
config.GetCacheAccountMemRatio() +
|
||||||
|
config.GetCacheAccountNoteMemRatio() +
|
||||||
|
config.GetCacheBlockMemRatio() +
|
||||||
|
config.GetCacheBlockIDsMemRatio() +
|
||||||
|
config.GetCacheEmojiMemRatio() +
|
||||||
|
config.GetCacheEmojiCategoryMemRatio() +
|
||||||
|
config.GetCacheFollowMemRatio() +
|
||||||
|
config.GetCacheFollowIDsMemRatio() +
|
||||||
|
config.GetCacheFollowRequestMemRatio() +
|
||||||
|
config.GetCacheFollowRequestIDsMemRatio() +
|
||||||
|
config.GetCacheInstanceMemRatio() +
|
||||||
|
config.GetCacheListMemRatio() +
|
||||||
|
config.GetCacheListEntryMemRatio() +
|
||||||
|
config.GetCacheMarkerMemRatio() +
|
||||||
|
config.GetCacheMediaMemRatio() +
|
||||||
|
config.GetCacheMentionMemRatio() +
|
||||||
|
config.GetCacheNotificationMemRatio() +
|
||||||
|
config.GetCacheReportMemRatio() +
|
||||||
|
config.GetCacheStatusMemRatio() +
|
||||||
|
config.GetCacheStatusFaveMemRatio() +
|
||||||
|
config.GetCacheTagMemRatio() +
|
||||||
|
config.GetCacheTombstoneMemRatio() +
|
||||||
|
config.GetCacheUserMemRatio() +
|
||||||
|
config.GetCacheWebfingerMemRatio() +
|
||||||
|
config.GetCacheVisibilityMemRatio()
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofAccount() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Account{
|
||||||
|
ID: exampleID,
|
||||||
|
Username: exampleUsername,
|
||||||
|
AvatarMediaAttachmentID: exampleID,
|
||||||
|
HeaderMediaAttachmentID: exampleID,
|
||||||
|
DisplayName: exampleUsername,
|
||||||
|
Note: exampleText,
|
||||||
|
NoteRaw: exampleText,
|
||||||
|
Memorial: func() *bool { ok := false; return &ok }(),
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
FetchedAt: time.Now(),
|
||||||
|
Bot: func() *bool { ok := true; return &ok }(),
|
||||||
|
Locked: func() *bool { ok := true; return &ok }(),
|
||||||
|
Discoverable: func() *bool { ok := false; return &ok }(),
|
||||||
|
Privacy: gtsmodel.VisibilityFollowersOnly,
|
||||||
|
Sensitive: func() *bool { ok := true; return &ok }(),
|
||||||
|
Language: "fr",
|
||||||
|
URI: exampleURI,
|
||||||
|
URL: exampleURI,
|
||||||
|
InboxURI: exampleURI,
|
||||||
|
OutboxURI: exampleURI,
|
||||||
|
FollowersURI: exampleURI,
|
||||||
|
FollowingURI: exampleURI,
|
||||||
|
FeaturedCollectionURI: exampleURI,
|
||||||
|
ActorType: ap.ActorPerson,
|
||||||
|
PrivateKey: &rsa.PrivateKey{},
|
||||||
|
PublicKey: &rsa.PublicKey{},
|
||||||
|
PublicKeyURI: exampleURI,
|
||||||
|
SensitizedAt: time.Time{},
|
||||||
|
SilencedAt: time.Now(),
|
||||||
|
SuspendedAt: time.Now(),
|
||||||
|
HideCollections: func() *bool { ok := true; return &ok }(),
|
||||||
|
SuspensionOrigin: "",
|
||||||
|
EnableRSS: func() *bool { ok := true; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofAccountNote() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.AccountNote{
|
||||||
|
ID: exampleID,
|
||||||
|
AccountID: exampleID,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
Comment: exampleTextSmall,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofBlock() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Block{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
URI: exampleURI,
|
||||||
|
AccountID: exampleID,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofEmoji() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Emoji{
|
||||||
|
ID: exampleID,
|
||||||
|
Shortcode: exampleTextSmall,
|
||||||
|
Domain: exampleURI,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
ImageRemoteURL: exampleURI,
|
||||||
|
ImageStaticRemoteURL: exampleURI,
|
||||||
|
ImageURL: exampleURI,
|
||||||
|
ImagePath: exampleURI,
|
||||||
|
ImageStaticURL: exampleURI,
|
||||||
|
ImageStaticPath: exampleURI,
|
||||||
|
ImageContentType: "image/png",
|
||||||
|
ImageStaticContentType: "image/png",
|
||||||
|
ImageUpdatedAt: time.Now(),
|
||||||
|
Disabled: func() *bool { ok := false; return &ok }(),
|
||||||
|
URI: "http://localhost:8080/emoji/01F8MH9H8E4VG3KDYJR9EGPXCQ",
|
||||||
|
VisibleInPicker: func() *bool { ok := true; return &ok }(),
|
||||||
|
CategoryID: "01GGQ8V4993XK67B2JB396YFB7",
|
||||||
|
Cached: func() *bool { ok := true; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofEmojiCategory() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.EmojiCategory{
|
||||||
|
ID: exampleID,
|
||||||
|
Name: exampleUsername,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofFollow() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Follow{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
AccountID: exampleID,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
ShowReblogs: func() *bool { ok := true; return &ok }(),
|
||||||
|
URI: exampleURI,
|
||||||
|
Notify: func() *bool { ok := false; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofFollowRequest() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.FollowRequest{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
AccountID: exampleID,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
ShowReblogs: func() *bool { ok := true; return &ok }(),
|
||||||
|
URI: exampleURI,
|
||||||
|
Notify: func() *bool { ok := false; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofInstance() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Instance{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Domain: exampleURI,
|
||||||
|
URI: exampleURI,
|
||||||
|
Title: exampleTextSmall,
|
||||||
|
ShortDescription: exampleText,
|
||||||
|
Description: exampleText,
|
||||||
|
ContactEmail: exampleUsername,
|
||||||
|
ContactAccountUsername: exampleUsername,
|
||||||
|
ContactAccountID: exampleID,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofList() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.List{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Title: exampleTextSmall,
|
||||||
|
AccountID: exampleID,
|
||||||
|
RepliesPolicy: gtsmodel.RepliesPolicyFollowed,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofListEntry() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.ListEntry{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
ListID: exampleID,
|
||||||
|
FollowID: exampleID,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofMarker() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Marker{
|
||||||
|
AccountID: exampleID,
|
||||||
|
Name: gtsmodel.MarkerNameHome,
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Version: 0,
|
||||||
|
LastReadID: exampleID,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofMedia() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.MediaAttachment{
|
||||||
|
ID: exampleID,
|
||||||
|
StatusID: exampleID,
|
||||||
|
URL: exampleURI,
|
||||||
|
RemoteURL: exampleURI,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Type: gtsmodel.FileTypeImage,
|
||||||
|
AccountID: exampleID,
|
||||||
|
Description: exampleText,
|
||||||
|
ScheduledStatusID: exampleID,
|
||||||
|
Blurhash: exampleTextSmall,
|
||||||
|
File: gtsmodel.File{
|
||||||
|
Path: exampleURI,
|
||||||
|
ContentType: "image/jpeg",
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
},
|
||||||
|
Thumbnail: gtsmodel.Thumbnail{
|
||||||
|
Path: exampleURI,
|
||||||
|
ContentType: "image/jpeg",
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
URL: exampleURI,
|
||||||
|
RemoteURL: exampleURI,
|
||||||
|
},
|
||||||
|
Avatar: func() *bool { ok := false; return &ok }(),
|
||||||
|
Header: func() *bool { ok := false; return &ok }(),
|
||||||
|
Cached: func() *bool { ok := true; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofMention() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Mention{
|
||||||
|
ID: exampleURI,
|
||||||
|
StatusID: exampleURI,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
OriginAccountID: exampleURI,
|
||||||
|
OriginAccountURI: exampleURI,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
NameString: exampleUsername,
|
||||||
|
TargetAccountURI: exampleURI,
|
||||||
|
TargetAccountURL: exampleURI,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofNotification() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Notification{
|
||||||
|
ID: exampleID,
|
||||||
|
NotificationType: gtsmodel.NotificationFave,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
OriginAccountID: exampleID,
|
||||||
|
StatusID: exampleID,
|
||||||
|
Read: func() *bool { ok := false; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofReport() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Report{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
URI: exampleURI,
|
||||||
|
AccountID: exampleID,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
Comment: exampleText,
|
||||||
|
StatusIDs: []string{exampleID, exampleID, exampleID},
|
||||||
|
Forwarded: func() *bool { ok := true; return &ok }(),
|
||||||
|
ActionTaken: exampleText,
|
||||||
|
ActionTakenAt: time.Now(),
|
||||||
|
ActionTakenByAccountID: exampleID,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofStatus() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Status{
|
||||||
|
ID: exampleURI,
|
||||||
|
URI: exampleURI,
|
||||||
|
URL: exampleURI,
|
||||||
|
Content: exampleText,
|
||||||
|
Text: exampleText,
|
||||||
|
AttachmentIDs: []string{exampleID, exampleID, exampleID},
|
||||||
|
TagIDs: []string{exampleID, exampleID, exampleID},
|
||||||
|
MentionIDs: []string{},
|
||||||
|
EmojiIDs: []string{exampleID, exampleID, exampleID},
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
FetchedAt: time.Now(),
|
||||||
|
Local: func() *bool { ok := false; return &ok }(),
|
||||||
|
AccountURI: exampleURI,
|
||||||
|
AccountID: exampleID,
|
||||||
|
InReplyToID: exampleID,
|
||||||
|
InReplyToURI: exampleURI,
|
||||||
|
InReplyToAccountID: exampleID,
|
||||||
|
BoostOfID: exampleID,
|
||||||
|
BoostOfAccountID: exampleID,
|
||||||
|
ContentWarning: exampleUsername, // similar length
|
||||||
|
Visibility: gtsmodel.VisibilityPublic,
|
||||||
|
Sensitive: func() *bool { ok := false; return &ok }(),
|
||||||
|
Language: "en",
|
||||||
|
CreatedWithApplicationID: exampleID,
|
||||||
|
Federated: func() *bool { ok := true; return &ok }(),
|
||||||
|
Boostable: func() *bool { ok := true; return &ok }(),
|
||||||
|
Replyable: func() *bool { ok := true; return &ok }(),
|
||||||
|
Likeable: func() *bool { ok := true; return &ok }(),
|
||||||
|
ActivityStreamsType: ap.ObjectNote,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofStatusFave() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.StatusFave{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
AccountID: exampleID,
|
||||||
|
TargetAccountID: exampleID,
|
||||||
|
StatusID: exampleID,
|
||||||
|
URI: exampleURI,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofTag() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Tag{
|
||||||
|
ID: exampleID,
|
||||||
|
Name: exampleUsername,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Useable: func() *bool { ok := true; return &ok }(),
|
||||||
|
Listable: func() *bool { ok := true; return &ok }(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofTombstone() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.Tombstone{
|
||||||
|
ID: exampleID,
|
||||||
|
CreatedAt: time.Now(),
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Domain: exampleUsername,
|
||||||
|
URI: exampleURI,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofVisibility() uintptr {
|
||||||
|
return uintptr(size.Of(&CachedVisibility{
|
||||||
|
ItemID: exampleID,
|
||||||
|
RequesterID: exampleID,
|
||||||
|
Type: VisibilityTypeAccount,
|
||||||
|
Value: false,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sizeofUser() uintptr {
|
||||||
|
return uintptr(size.Of(>smodel.User{}))
|
||||||
|
}
|
|
@ -18,14 +18,14 @@
|
||||||
package cache
|
package cache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"codeberg.org/gruf/go-cache/v3/ttl"
|
"codeberg.org/gruf/go-cache/v3/simple"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SliceCache wraps a ttl.Cache to provide simple loader-callback
|
// SliceCache wraps a ttl.Cache to provide simple loader-callback
|
||||||
// functions for fetching + caching slices of objects (e.g. IDs).
|
// functions for fetching + caching slices of objects (e.g. IDs).
|
||||||
type SliceCache[T any] struct {
|
type SliceCache[T any] struct {
|
||||||
*ttl.Cache[string, []T]
|
*simple.Cache[string, []T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load will attempt to load an existing slice from the cache for the given key, else calling the provided load function and caching the result.
|
// Load will attempt to load an existing slice from the cache for the given key, else calling the provided load function and caching the result.
|
||||||
|
|
|
@ -20,10 +20,8 @@ package cache
|
||||||
import (
|
import (
|
||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-cache/v3/result"
|
|
||||||
errorsv2 "codeberg.org/gruf/go-errors/v2"
|
errorsv2 "codeberg.org/gruf/go-errors/v2"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/db"
|
"github.com/superseriousbusiness/gotosocial/internal/db"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/log"
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
|
@ -56,26 +54,6 @@ func (*nocopy) Lock() {}
|
||||||
|
|
||||||
func (*nocopy) Unlock() {}
|
func (*nocopy) Unlock() {}
|
||||||
|
|
||||||
// tryStart will attempt to start the given cache only if sweep duration > 0 (sweeping is enabled).
|
|
||||||
func tryStart[ValueType any](cache *result.Cache[ValueType], sweep time.Duration) {
|
|
||||||
if sweep > 0 {
|
|
||||||
var z ValueType
|
|
||||||
msg := fmt.Sprintf("starting %T cache", z)
|
|
||||||
tryUntil(msg, 5, func() bool {
|
|
||||||
return cache.Start(sweep)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryStop will attempt to stop the given cache only if sweep duration > 0 (sweeping is enabled).
|
|
||||||
func tryStop[ValueType any](cache *result.Cache[ValueType], sweep time.Duration) {
|
|
||||||
if sweep > 0 {
|
|
||||||
var z ValueType
|
|
||||||
msg := fmt.Sprintf("stopping %T cache", z)
|
|
||||||
tryUntil(msg, 5, cache.Stop)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryUntil will attempt to call 'do' for 'count' attempts, before panicking with 'msg'.
|
// tryUntil will attempt to call 'do' for 'count' attempts, before panicking with 'msg'.
|
||||||
func tryUntil(msg string, count int, do func() bool) {
|
func tryUntil(msg string, count int, do func() bool) {
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
|
|
|
@ -20,6 +20,7 @@ package cache
|
||||||
import (
|
import (
|
||||||
"codeberg.org/gruf/go-cache/v3/result"
|
"codeberg.org/gruf/go-cache/v3/result"
|
||||||
"github.com/superseriousbusiness/gotosocial/internal/config"
|
"github.com/superseriousbusiness/gotosocial/internal/config"
|
||||||
|
"github.com/superseriousbusiness/gotosocial/internal/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
type VisibilityCache struct {
|
type VisibilityCache struct {
|
||||||
|
@ -29,6 +30,14 @@ type VisibilityCache struct {
|
||||||
// Init will initialize the visibility cache in this collection.
|
// Init will initialize the visibility cache in this collection.
|
||||||
// NOTE: the cache MUST NOT be in use anywhere, this is not thread-safe.
|
// NOTE: the cache MUST NOT be in use anywhere, this is not thread-safe.
|
||||||
func (c *VisibilityCache) Init() {
|
func (c *VisibilityCache) Init() {
|
||||||
|
// Calculate maximum cache size.
|
||||||
|
cap := calculateResultCacheMax(
|
||||||
|
sizeofVisibility(), // model in-mem size.
|
||||||
|
config.GetCacheVisibilityMemRatio(),
|
||||||
|
)
|
||||||
|
|
||||||
|
log.Infof(nil, "Visibility cache size = %d", cap)
|
||||||
|
|
||||||
c.Cache = result.New([]result.Lookup{
|
c.Cache = result.New([]result.Lookup{
|
||||||
{Name: "ItemID", Multi: true},
|
{Name: "ItemID", Multi: true},
|
||||||
{Name: "RequesterID", Multi: true},
|
{Name: "RequesterID", Multi: true},
|
||||||
|
@ -37,19 +46,17 @@ func (c *VisibilityCache) Init() {
|
||||||
v2 := new(CachedVisibility)
|
v2 := new(CachedVisibility)
|
||||||
*v2 = *v1
|
*v2 = *v1
|
||||||
return v2
|
return v2
|
||||||
}, config.GetCacheVisibilityMaxSize())
|
}, cap)
|
||||||
c.Cache.SetTTL(config.GetCacheVisibilityTTL(), true)
|
|
||||||
c.Cache.IgnoreErrors(ignoreErrors)
|
c.Cache.IgnoreErrors(ignoreErrors)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start will attempt to start the visibility cache, or panic.
|
// Start will attempt to start the visibility cache, or panic.
|
||||||
func (c *VisibilityCache) Start() {
|
func (c *VisibilityCache) Start() {
|
||||||
tryStart(c.Cache, config.GetCacheVisibilitySweepFreq())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop will attempt to stop the visibility cache, or panic.
|
// Stop will attempt to stop the visibility cache, or panic.
|
||||||
func (c *VisibilityCache) Stop() {
|
func (c *VisibilityCache) Stop() {
|
||||||
tryStop(c.Cache, config.GetCacheVisibilitySweepFreq())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// VisibilityType represents a visibility lookup type.
|
// VisibilityType represents a visibility lookup type.
|
||||||
|
|
|
@ -175,113 +175,32 @@ type HTTPClientConfiguration struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type CacheConfiguration struct {
|
type CacheConfiguration struct {
|
||||||
GTS GTSCacheConfiguration `name:"gts"`
|
MemoryTarget bytesize.Size `name:"memory-target"`
|
||||||
|
AccountMemRatio float64 `name:"account-mem-ratio"`
|
||||||
VisibilityMaxSize int `name:"visibility-max-size"`
|
AccountNoteMemRatio float64 `name:"account-note-mem-ratio"`
|
||||||
VisibilityTTL time.Duration `name:"visibility-ttl"`
|
BlockMemRatio float64 `name:"block-mem-ratio"`
|
||||||
VisibilitySweepFreq time.Duration `name:"visibility-sweep-freq"`
|
BlockIDsMemRatio float64 `name:"block-mem-ratio"`
|
||||||
}
|
EmojiMemRatio float64 `name:"emoji-mem-ratio"`
|
||||||
|
EmojiCategoryMemRatio float64 `name:"emoji-category-mem-ratio"`
|
||||||
type GTSCacheConfiguration struct {
|
FollowMemRatio float64 `name:"follow-mem-ratio"`
|
||||||
AccountMaxSize int `name:"account-max-size"`
|
FollowIDsMemRatio float64 `name:"follow-ids-mem-ratio"`
|
||||||
AccountTTL time.Duration `name:"account-ttl"`
|
FollowRequestMemRatio float64 `name:"follow-request-mem-ratio"`
|
||||||
AccountSweepFreq time.Duration `name:"account-sweep-freq"`
|
FollowRequestIDsMemRatio float64 `name:"follow-request-ids-mem-ratio"`
|
||||||
|
InstanceMemRatio float64 `name:"instance-mem-ratio"`
|
||||||
AccountNoteMaxSize int `name:"account-note-max-size"`
|
ListMemRatio float64 `name:"list-mem-ratio"`
|
||||||
AccountNoteTTL time.Duration `name:"account-note-ttl"`
|
ListEntryMemRatio float64 `name:"list-entry-mem-ratio"`
|
||||||
AccountNoteSweepFreq time.Duration `name:"account-note-sweep-freq"`
|
MarkerMemRatio float64 `name:"marker-mem-ratio"`
|
||||||
|
MediaMemRatio float64 `name:"media-mem-ratio"`
|
||||||
BlockMaxSize int `name:"block-max-size"`
|
MentionMemRatio float64 `name:"mention-mem-ratio"`
|
||||||
BlockTTL time.Duration `name:"block-ttl"`
|
NotificationMemRatio float64 `name:"notification-mem-ratio"`
|
||||||
BlockSweepFreq time.Duration `name:"block-sweep-freq"`
|
ReportMemRatio float64 `name:"report-mem-ratio"`
|
||||||
|
StatusMemRatio float64 `name:"status-mem-ratio"`
|
||||||
BlockIDsMaxSize int `name:"block-ids-max-size"`
|
StatusFaveMemRatio float64 `name:"status-fave-mem-ratio"`
|
||||||
BlockIDsTTL time.Duration `name:"block-ids-ttl"`
|
TagMemRatio float64 `name:"tag-mem-ratio"`
|
||||||
BlockIDsSweepFreq time.Duration `name:"block-ids-sweep-freq"`
|
TombstoneMemRatio float64 `name:"tombstone-mem-ratio"`
|
||||||
|
UserMemRatio float64 `name:"user-mem-ratio"`
|
||||||
DomainBlockMaxSize int `name:"domain-block-max-size"`
|
WebfingerMemRatio float64 `name:"webfinger-mem-ratio"`
|
||||||
DomainBlockTTL time.Duration `name:"domain-block-ttl"`
|
VisibilityMemRatio float64 `name:"visibility-mem-ratio"`
|
||||||
DomainBlockSweepFreq time.Duration `name:"domain-block-sweep-freq"`
|
|
||||||
|
|
||||||
EmojiMaxSize int `name:"emoji-max-size"`
|
|
||||||
EmojiTTL time.Duration `name:"emoji-ttl"`
|
|
||||||
EmojiSweepFreq time.Duration `name:"emoji-sweep-freq"`
|
|
||||||
|
|
||||||
EmojiCategoryMaxSize int `name:"emoji-category-max-size"`
|
|
||||||
EmojiCategoryTTL time.Duration `name:"emoji-category-ttl"`
|
|
||||||
EmojiCategorySweepFreq time.Duration `name:"emoji-category-sweep-freq"`
|
|
||||||
|
|
||||||
FollowMaxSize int `name:"follow-max-size"`
|
|
||||||
FollowTTL time.Duration `name:"follow-ttl"`
|
|
||||||
FollowSweepFreq time.Duration `name:"follow-sweep-freq"`
|
|
||||||
|
|
||||||
FollowIDsMaxSize int `name:"follow-ids-max-size"`
|
|
||||||
FollowIDsTTL time.Duration `name:"follow-ids-ttl"`
|
|
||||||
FollowIDsSweepFreq time.Duration `name:"follow-ids-sweep-freq"`
|
|
||||||
|
|
||||||
FollowRequestMaxSize int `name:"follow-request-max-size"`
|
|
||||||
FollowRequestTTL time.Duration `name:"follow-request-ttl"`
|
|
||||||
FollowRequestSweepFreq time.Duration `name:"follow-request-sweep-freq"`
|
|
||||||
|
|
||||||
FollowRequestIDsMaxSize int `name:"follow-request-ids-max-size"`
|
|
||||||
FollowRequestIDsTTL time.Duration `name:"follow-request-ids-ttl"`
|
|
||||||
FollowRequestIDsSweepFreq time.Duration `name:"follow-request-ids-sweep-freq"`
|
|
||||||
|
|
||||||
InstanceMaxSize int `name:"instance-max-size"`
|
|
||||||
InstanceTTL time.Duration `name:"instance-ttl"`
|
|
||||||
InstanceSweepFreq time.Duration `name:"instance-sweep-freq"`
|
|
||||||
|
|
||||||
ListMaxSize int `name:"list-max-size"`
|
|
||||||
ListTTL time.Duration `name:"list-ttl"`
|
|
||||||
ListSweepFreq time.Duration `name:"list-sweep-freq"`
|
|
||||||
|
|
||||||
ListEntryMaxSize int `name:"list-entry-max-size"`
|
|
||||||
ListEntryTTL time.Duration `name:"list-entry-ttl"`
|
|
||||||
ListEntrySweepFreq time.Duration `name:"list-entry-sweep-freq"`
|
|
||||||
|
|
||||||
MarkerMaxSize int `name:"marker-max-size"`
|
|
||||||
MarkerTTL time.Duration `name:"marker-ttl"`
|
|
||||||
MarkerSweepFreq time.Duration `name:"marker-sweep-freq"`
|
|
||||||
|
|
||||||
MediaMaxSize int `name:"media-max-size"`
|
|
||||||
MediaTTL time.Duration `name:"media-ttl"`
|
|
||||||
MediaSweepFreq time.Duration `name:"media-sweep-freq"`
|
|
||||||
|
|
||||||
MentionMaxSize int `name:"mention-max-size"`
|
|
||||||
MentionTTL time.Duration `name:"mention-ttl"`
|
|
||||||
MentionSweepFreq time.Duration `name:"mention-sweep-freq"`
|
|
||||||
|
|
||||||
NotificationMaxSize int `name:"notification-max-size"`
|
|
||||||
NotificationTTL time.Duration `name:"notification-ttl"`
|
|
||||||
NotificationSweepFreq time.Duration `name:"notification-sweep-freq"`
|
|
||||||
|
|
||||||
ReportMaxSize int `name:"report-max-size"`
|
|
||||||
ReportTTL time.Duration `name:"report-ttl"`
|
|
||||||
ReportSweepFreq time.Duration `name:"report-sweep-freq"`
|
|
||||||
|
|
||||||
StatusMaxSize int `name:"status-max-size"`
|
|
||||||
StatusTTL time.Duration `name:"status-ttl"`
|
|
||||||
StatusSweepFreq time.Duration `name:"status-sweep-freq"`
|
|
||||||
|
|
||||||
StatusFaveMaxSize int `name:"status-fave-max-size"`
|
|
||||||
StatusFaveTTL time.Duration `name:"status-fave-ttl"`
|
|
||||||
StatusFaveSweepFreq time.Duration `name:"status-fave-sweep-freq"`
|
|
||||||
|
|
||||||
TagMaxSize int `name:"tag-max-size"`
|
|
||||||
TagTTL time.Duration `name:"tag-ttl"`
|
|
||||||
TagSweepFreq time.Duration `name:"tag-sweep-freq"`
|
|
||||||
|
|
||||||
TombstoneMaxSize int `name:"tombstone-max-size"`
|
|
||||||
TombstoneTTL time.Duration `name:"tombstone-ttl"`
|
|
||||||
TombstoneSweepFreq time.Duration `name:"tombstone-sweep-freq"`
|
|
||||||
|
|
||||||
UserMaxSize int `name:"user-max-size"`
|
|
||||||
UserTTL time.Duration `name:"user-ttl"`
|
|
||||||
UserSweepFreq time.Duration `name:"user-sweep-freq"`
|
|
||||||
|
|
||||||
WebfingerMaxSize int `name:"webfinger-max-size"`
|
|
||||||
WebfingerTTL time.Duration `name:"webfinger-ttl"`
|
|
||||||
WebfingerSweepFreq time.Duration `name:"webfinger-sweep-freq"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalMap will marshal current Configuration into a map structure (useful for JSON/TOML/YAML).
|
// MarshalMap will marshal current Configuration into a map structure (useful for JSON/TOML/YAML).
|
||||||
|
|
|
@ -126,111 +126,50 @@ var Defaults = Configuration{
|
||||||
AdvancedSenderMultiplier: 2, // 2 senders per CPU
|
AdvancedSenderMultiplier: 2, // 2 senders per CPU
|
||||||
|
|
||||||
Cache: CacheConfiguration{
|
Cache: CacheConfiguration{
|
||||||
GTS: GTSCacheConfiguration{
|
// Rough memory target that the total
|
||||||
AccountMaxSize: 2000,
|
// size of all State.Caches will attempt
|
||||||
AccountTTL: time.Minute * 30,
|
// to remain with. Emphasis on *rough*.
|
||||||
AccountSweepFreq: time.Minute,
|
MemoryTarget: 200 * bytesize.MiB,
|
||||||
|
|
||||||
AccountNoteMaxSize: 1000,
|
// These ratios signal what percentage
|
||||||
AccountNoteTTL: time.Minute * 30,
|
// of the available cache target memory
|
||||||
AccountNoteSweepFreq: time.Minute,
|
// is allocated to each object type's
|
||||||
|
// cache.
|
||||||
BlockMaxSize: 1000,
|
//
|
||||||
BlockTTL: time.Minute * 30,
|
// These are weighted by a totally
|
||||||
BlockSweepFreq: time.Minute,
|
// assorted mixture of priority, and
|
||||||
|
// manual twiddling to get the generated
|
||||||
BlockIDsMaxSize: 500,
|
// cache capacity ratios within normal
|
||||||
BlockIDsTTL: time.Minute * 30,
|
// amounts dependent size of the models.
|
||||||
BlockIDsSweepFreq: time.Minute,
|
//
|
||||||
|
// when TODO items in the size.go source
|
||||||
DomainBlockMaxSize: 2000,
|
// file have been addressed, these should
|
||||||
DomainBlockTTL: time.Hour * 24,
|
// be able to make some more sense :D
|
||||||
DomainBlockSweepFreq: time.Minute,
|
AccountMemRatio: 18,
|
||||||
|
AccountNoteMemRatio: 0.1,
|
||||||
EmojiMaxSize: 2000,
|
BlockMemRatio: 3,
|
||||||
EmojiTTL: time.Minute * 30,
|
BlockIDsMemRatio: 3,
|
||||||
EmojiSweepFreq: time.Minute,
|
EmojiMemRatio: 3,
|
||||||
|
EmojiCategoryMemRatio: 0.1,
|
||||||
EmojiCategoryMaxSize: 100,
|
FollowMemRatio: 4,
|
||||||
EmojiCategoryTTL: time.Minute * 30,
|
FollowIDsMemRatio: 4,
|
||||||
EmojiCategorySweepFreq: time.Minute,
|
FollowRequestMemRatio: 2,
|
||||||
|
FollowRequestIDsMemRatio: 2,
|
||||||
FollowMaxSize: 2000,
|
InstanceMemRatio: 1,
|
||||||
FollowTTL: time.Minute * 30,
|
ListMemRatio: 3,
|
||||||
FollowSweepFreq: time.Minute,
|
ListEntryMemRatio: 3,
|
||||||
|
MarkerMemRatio: 0.5,
|
||||||
FollowIDsMaxSize: 500,
|
MediaMemRatio: 4,
|
||||||
FollowIDsTTL: time.Minute * 30,
|
MentionMemRatio: 5,
|
||||||
FollowIDsSweepFreq: time.Minute,
|
NotificationMemRatio: 5,
|
||||||
|
ReportMemRatio: 1,
|
||||||
FollowRequestMaxSize: 2000,
|
StatusMemRatio: 18,
|
||||||
FollowRequestTTL: time.Minute * 30,
|
StatusFaveMemRatio: 5,
|
||||||
FollowRequestSweepFreq: time.Minute,
|
TagMemRatio: 3,
|
||||||
|
TombstoneMemRatio: 2,
|
||||||
FollowRequestIDsMaxSize: 500,
|
UserMemRatio: 0.1,
|
||||||
FollowRequestIDsTTL: time.Minute * 30,
|
WebfingerMemRatio: 0.1,
|
||||||
FollowRequestIDsSweepFreq: time.Minute,
|
VisibilityMemRatio: 2,
|
||||||
|
|
||||||
InstanceMaxSize: 2000,
|
|
||||||
InstanceTTL: time.Minute * 30,
|
|
||||||
InstanceSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
ListMaxSize: 2000,
|
|
||||||
ListTTL: time.Minute * 30,
|
|
||||||
ListSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
ListEntryMaxSize: 2000,
|
|
||||||
ListEntryTTL: time.Minute * 30,
|
|
||||||
ListEntrySweepFreq: time.Minute,
|
|
||||||
|
|
||||||
MarkerMaxSize: 2000,
|
|
||||||
MarkerTTL: time.Hour * 6,
|
|
||||||
MarkerSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
MediaMaxSize: 1000,
|
|
||||||
MediaTTL: time.Minute * 30,
|
|
||||||
MediaSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
MentionMaxSize: 2000,
|
|
||||||
MentionTTL: time.Minute * 30,
|
|
||||||
MentionSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
NotificationMaxSize: 1000,
|
|
||||||
NotificationTTL: time.Minute * 30,
|
|
||||||
NotificationSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
ReportMaxSize: 100,
|
|
||||||
ReportTTL: time.Minute * 30,
|
|
||||||
ReportSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
StatusMaxSize: 2000,
|
|
||||||
StatusTTL: time.Minute * 30,
|
|
||||||
StatusSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
StatusFaveMaxSize: 2000,
|
|
||||||
StatusFaveTTL: time.Minute * 30,
|
|
||||||
StatusFaveSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
TagMaxSize: 2000,
|
|
||||||
TagTTL: time.Minute * 30,
|
|
||||||
TagSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
TombstoneMaxSize: 500,
|
|
||||||
TombstoneTTL: time.Minute * 30,
|
|
||||||
TombstoneSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
UserMaxSize: 500,
|
|
||||||
UserTTL: time.Minute * 30,
|
|
||||||
UserSweepFreq: time.Minute,
|
|
||||||
|
|
||||||
WebfingerMaxSize: 250,
|
|
||||||
WebfingerTTL: time.Hour * 24,
|
|
||||||
WebfingerSweepFreq: time.Minute * 15,
|
|
||||||
},
|
|
||||||
|
|
||||||
VisibilityMaxSize: 2000,
|
|
||||||
VisibilityTTL: time.Minute * 30,
|
|
||||||
VisibilitySweepFreq: time.Minute,
|
|
||||||
},
|
},
|
||||||
|
|
||||||
HTTPClient: HTTPClientConfiguration{
|
HTTPClient: HTTPClientConfiguration{
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -108,7 +108,7 @@ type Config struct {
|
||||||
// - request logging
|
// - request logging
|
||||||
type Client struct {
|
type Client struct {
|
||||||
client http.Client
|
client http.Client
|
||||||
badHosts cache.Cache[string, struct{}]
|
badHosts cache.TTLCache[string, struct{}]
|
||||||
bodyMax int64
|
bodyMax int64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ func New(cfg Config) *Client {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initiate outgoing bad hosts lookup cache.
|
// Initiate outgoing bad hosts lookup cache.
|
||||||
c.badHosts = cache.New[string, struct{}](0, 1000, 0)
|
c.badHosts = cache.NewTTL[string, struct{}](0, 1000, 0)
|
||||||
c.badHosts.SetTTL(time.Hour, false)
|
c.badHosts.SetTTL(time.Hour, false)
|
||||||
if !c.badHosts.Start(time.Minute) {
|
if !c.badHosts.Start(time.Minute) {
|
||||||
log.Panic(nil, "failed to start transport controller cache")
|
log.Panic(nil, "failed to start transport controller cache")
|
||||||
|
|
|
@ -50,7 +50,7 @@ type controller struct {
|
||||||
fedDB federatingdb.DB
|
fedDB federatingdb.DB
|
||||||
clock pub.Clock
|
clock pub.Clock
|
||||||
client httpclient.SigningClient
|
client httpclient.SigningClient
|
||||||
trspCache cache.Cache[string, *transport]
|
trspCache cache.TTLCache[string, *transport]
|
||||||
userAgent string
|
userAgent string
|
||||||
senders int // no. concurrent batch delivery routines.
|
senders int // no. concurrent batch delivery routines.
|
||||||
}
|
}
|
||||||
|
@ -76,7 +76,7 @@ func NewController(state *state.State, federatingDB federatingdb.DB, clock pub.C
|
||||||
fedDB: federatingDB,
|
fedDB: federatingDB,
|
||||||
clock: clock,
|
clock: clock,
|
||||||
client: client,
|
client: client,
|
||||||
trspCache: cache.New[string, *transport](0, 100, 0),
|
trspCache: cache.NewTTL[string, *transport](0, 100, 0),
|
||||||
userAgent: fmt.Sprintf("%s (+%s://%s) gotosocial/%s", applicationName, proto, host, version),
|
userAgent: fmt.Sprintf("%s (+%s://%s) gotosocial/%s", applicationName, proto, host, version),
|
||||||
senders: senders,
|
senders: senders,
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,8 @@ import (
|
||||||
"codeberg.org/gruf/go-cache/v3"
|
"codeberg.org/gruf/go-cache/v3"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newETagCache() cache.Cache[string, eTagCacheEntry] {
|
func newETagCache() cache.TTLCache[string, eTagCacheEntry] {
|
||||||
eTagCache := cache.New[string, eTagCacheEntry](0, 1000, 0)
|
eTagCache := cache.NewTTL[string, eTagCacheEntry](0, 1000, 0)
|
||||||
eTagCache.SetTTL(time.Hour, false)
|
eTagCache.SetTTL(time.Hour, false)
|
||||||
if !eTagCache.Start(time.Minute) {
|
if !eTagCache.Start(time.Minute) {
|
||||||
log.Panic(nil, "could not start eTagCache")
|
log.Panic(nil, "could not start eTagCache")
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
set -eu
|
set -eu
|
||||||
|
|
||||||
EXPECT=$(cat <<"EOF"
|
EXPECT=$(cat << "EOF"
|
||||||
{
|
{
|
||||||
"account-domain": "peepee",
|
"account-domain": "peepee",
|
||||||
"accounts-allow-custom-css": true,
|
"accounts-allow-custom-css": true,
|
||||||
|
@ -18,86 +18,31 @@ EXPECT=$(cat <<"EOF"
|
||||||
"application-name": "gts",
|
"application-name": "gts",
|
||||||
"bind-address": "127.0.0.1",
|
"bind-address": "127.0.0.1",
|
||||||
"cache": {
|
"cache": {
|
||||||
"gts": {
|
"account-mem-ratio": 18,
|
||||||
"account-max-size": 99,
|
"account-note-mem-ratio": 0.1,
|
||||||
"account-note-max-size": 1000,
|
"block-mem-ratio": 3,
|
||||||
"account-note-sweep-freq": 60000000000,
|
"emoji-category-mem-ratio": 0.1,
|
||||||
"account-note-ttl": 1800000000000,
|
"emoji-mem-ratio": 3,
|
||||||
"account-sweep-freq": 1000000000,
|
"follow-ids-mem-ratio": 4,
|
||||||
"account-ttl": 10800000000000,
|
"follow-mem-ratio": 4,
|
||||||
"block-ids-max-size": 500,
|
"follow-request-ids-mem-ratio": 2,
|
||||||
"block-ids-sweep-freq": 60000000000,
|
"follow-request-mem-ratio": 2,
|
||||||
"block-ids-ttl": 1800000000000,
|
"instance-mem-ratio": 1,
|
||||||
"block-max-size": 1000,
|
"list-entry-mem-ratio": 3,
|
||||||
"block-sweep-freq": 60000000000,
|
"list-mem-ratio": 3,
|
||||||
"block-ttl": 1800000000000,
|
"marker-mem-ratio": 0.5,
|
||||||
"domain-block-max-size": 2000,
|
"media-mem-ratio": 4,
|
||||||
"domain-block-sweep-freq": 60000000000,
|
"memory-target": 209715200,
|
||||||
"domain-block-ttl": 86400000000000,
|
"mention-mem-ratio": 5,
|
||||||
"emoji-category-max-size": 100,
|
"notification-mem-ratio": 5,
|
||||||
"emoji-category-sweep-freq": 60000000000,
|
"report-mem-ratio": 1,
|
||||||
"emoji-category-ttl": 1800000000000,
|
"status-fave-mem-ratio": 5,
|
||||||
"emoji-max-size": 2000,
|
"status-mem-ratio": 18,
|
||||||
"emoji-sweep-freq": 60000000000,
|
"tag-mem-ratio": 3,
|
||||||
"emoji-ttl": 1800000000000,
|
"tombstone-mem-ratio": 2,
|
||||||
"follow-ids-max-size": 500,
|
"user-mem-ratio": 0.1,
|
||||||
"follow-ids-sweep-freq": 60000000000,
|
"visibility-mem-ratio": 2,
|
||||||
"follow-ids-ttl": 1800000000000,
|
"webfinger-mem-ratio": 0.1
|
||||||
"follow-max-size": 2000,
|
|
||||||
"follow-request-ids-max-size": 500,
|
|
||||||
"follow-request-ids-sweep-freq": 60000000000,
|
|
||||||
"follow-request-ids-ttl": 1800000000000,
|
|
||||||
"follow-request-max-size": 2000,
|
|
||||||
"follow-request-sweep-freq": 60000000000,
|
|
||||||
"follow-request-ttl": 1800000000000,
|
|
||||||
"follow-sweep-freq": 60000000000,
|
|
||||||
"follow-ttl": 1800000000000,
|
|
||||||
"instance-max-size": 2000,
|
|
||||||
"instance-sweep-freq": 60000000000,
|
|
||||||
"instance-ttl": 1800000000000,
|
|
||||||
"list-entry-max-size": 2000,
|
|
||||||
"list-entry-sweep-freq": 60000000000,
|
|
||||||
"list-entry-ttl": 1800000000000,
|
|
||||||
"list-max-size": 2000,
|
|
||||||
"list-sweep-freq": 60000000000,
|
|
||||||
"list-ttl": 1800000000000,
|
|
||||||
"marker-max-size": 2000,
|
|
||||||
"marker-sweep-freq": 60000000000,
|
|
||||||
"marker-ttl": 21600000000000,
|
|
||||||
"media-max-size": 1000,
|
|
||||||
"media-sweep-freq": 60000000000,
|
|
||||||
"media-ttl": 1800000000000,
|
|
||||||
"mention-max-size": 2000,
|
|
||||||
"mention-sweep-freq": 60000000000,
|
|
||||||
"mention-ttl": 1800000000000,
|
|
||||||
"notification-max-size": 1000,
|
|
||||||
"notification-sweep-freq": 60000000000,
|
|
||||||
"notification-ttl": 1800000000000,
|
|
||||||
"report-max-size": 100,
|
|
||||||
"report-sweep-freq": 60000000000,
|
|
||||||
"report-ttl": 1800000000000,
|
|
||||||
"status-fave-max-size": 2000,
|
|
||||||
"status-fave-sweep-freq": 60000000000,
|
|
||||||
"status-fave-ttl": 1800000000000,
|
|
||||||
"status-max-size": 2000,
|
|
||||||
"status-sweep-freq": 60000000000,
|
|
||||||
"status-ttl": 1800000000000,
|
|
||||||
"tag-max-size": 2000,
|
|
||||||
"tag-sweep-freq": 60000000000,
|
|
||||||
"tag-ttl": 1800000000000,
|
|
||||||
"tombstone-max-size": 500,
|
|
||||||
"tombstone-sweep-freq": 60000000000,
|
|
||||||
"tombstone-ttl": 1800000000000,
|
|
||||||
"user-max-size": 500,
|
|
||||||
"user-sweep-freq": 60000000000,
|
|
||||||
"user-ttl": 1800000000000,
|
|
||||||
"webfinger-max-size": 250,
|
|
||||||
"webfinger-sweep-freq": 900000000000,
|
|
||||||
"webfinger-ttl": 86400000000000
|
|
||||||
},
|
|
||||||
"visibility-max-size": 2000,
|
|
||||||
"visibility-sweep-freq": 60000000000,
|
|
||||||
"visibility-ttl": 1800000000000
|
|
||||||
},
|
},
|
||||||
"config-path": "internal/config/testdata/test.yaml",
|
"config-path": "internal/config/testdata/test.yaml",
|
||||||
"db-address": ":memory:",
|
"db-address": ":memory:",
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022 gruf
|
Copyright (c) gruf
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
# go-cache
|
# go-cache
|
||||||
|
|
||||||
Provides access to a simple yet flexible, performant TTL cache via the `Cache{}` interface and `cache.New()`. Under the hood this is returning a `ttl.Cache{}`.
|
Provides access to simple, yet flexible, and performant caches (with TTL if required) via the `cache.Cache{}` and `cache.TTLCache{}` interfaces.
|
||||||
|
|
||||||
|
## simple
|
||||||
|
|
||||||
|
A `cache.Cache{}` implementation with much more of the inner workings exposed. Designed to be used as a base for your own customizations, or used as-is.
|
||||||
|
|
||||||
## ttl
|
## ttl
|
||||||
|
|
||||||
A TTL cache implementation with much of the inner workings exposed, designed to be used as a base for your own customizations, or used as-is. Access via the base package `cache.New()` is recommended in the latter case, to prevent accidental use of unsafe methods.
|
A `cache.TTLCache{}` implementation with much more of the inner workings exposed. Designed to be used as a base for your own customizations, or used as-is.
|
||||||
|
|
||||||
## lookup
|
|
||||||
|
|
||||||
`lookup.Cache` is an example of a more complex cache implementation using `ttl.Cache{}` as its underpinning. It provides caching of items under multiple keys.
|
|
||||||
|
|
||||||
## result
|
## result
|
||||||
|
|
||||||
|
|
|
@ -3,26 +3,33 @@ package cache
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
ttlcache "codeberg.org/gruf/go-cache/v3/ttl"
|
"codeberg.org/gruf/go-cache/v3/simple"
|
||||||
|
"codeberg.org/gruf/go-cache/v3/ttl"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Cache represents a TTL cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop ttl.Cache{}.
|
// TTLCache represents a TTL cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop ttl.Cache{}.
|
||||||
type Cache[Key comparable, Value any] interface {
|
type TTLCache[Key comparable, Value any] interface {
|
||||||
// Start will start the cache background eviction routine with given sweep frequency. If already running or a freq <= 0 provided, this is a no-op. This will block until the eviction routine has started.
|
// Start will start the cache background eviction routine with given sweep frequency. If already running or a freq <= 0 provided, this is a no-op. This will block until the eviction routine has started.
|
||||||
Start(freq time.Duration) bool
|
Start(freq time.Duration) bool
|
||||||
|
|
||||||
// Stop will stop cache background eviction routine. If not running this is a no-op. This will block until the eviction routine has stopped.
|
// Stop will stop cache background eviction routine. If not running this is a no-op. This will block until the eviction routine has stopped.
|
||||||
Stop() bool
|
Stop() bool
|
||||||
|
|
||||||
|
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items in the cache, this will simply add the change in TTL to their current expiry time.
|
||||||
|
SetTTL(ttl time.Duration, update bool)
|
||||||
|
|
||||||
|
// implements base cache.
|
||||||
|
Cache[Key, Value]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache represents a cache with customizable callbacks, it exists here to abstract away the "unsafe" methods in the case that you do not want your own implementation atop simple.Cache{}.
|
||||||
|
type Cache[Key comparable, Value any] interface {
|
||||||
// SetEvictionCallback sets the eviction callback to the provided hook.
|
// SetEvictionCallback sets the eviction callback to the provided hook.
|
||||||
SetEvictionCallback(hook func(Key, Value))
|
SetEvictionCallback(hook func(Key, Value))
|
||||||
|
|
||||||
// SetInvalidateCallback sets the invalidate callback to the provided hook.
|
// SetInvalidateCallback sets the invalidate callback to the provided hook.
|
||||||
SetInvalidateCallback(hook func(Key, Value))
|
SetInvalidateCallback(hook func(Key, Value))
|
||||||
|
|
||||||
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items in the cache, this will simply add the change in TTL to their current expiry time.
|
|
||||||
SetTTL(ttl time.Duration, update bool)
|
|
||||||
|
|
||||||
// Get fetches the value with key from the cache, extending its TTL.
|
// Get fetches the value with key from the cache, extending its TTL.
|
||||||
Get(key Key) (value Value, ok bool)
|
Get(key Key) (value Value, ok bool)
|
||||||
|
|
||||||
|
@ -57,7 +64,12 @@ type Cache[Key comparable, Value any] interface {
|
||||||
Cap() int
|
Cap() int
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new initialized Cache with given initial length, maximum capacity and item TTL.
|
// New returns a new initialized Cache with given initial length, maximum capacity.
|
||||||
func New[K comparable, V any](len, cap int, ttl time.Duration) Cache[K, V] {
|
func New[K comparable, V any](len, cap int) Cache[K, V] {
|
||||||
return ttlcache.New[K, V](len, cap, ttl)
|
return simple.New[K, V](len, cap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTTL returns a new initialized TTLCache with given initial length, maximum capacity and TTL duration.
|
||||||
|
func NewTTL[K comparable, V any](len, cap int, _ttl time.Duration) TTLCache[K, V] {
|
||||||
|
return ttl.New[K, V](len, cap, _ttl)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,14 +2,38 @@ package result
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
"reflect"
|
"reflect"
|
||||||
"time"
|
|
||||||
_ "unsafe"
|
_ "unsafe"
|
||||||
|
|
||||||
"codeberg.org/gruf/go-cache/v3/ttl"
|
"codeberg.org/gruf/go-cache/v3/simple"
|
||||||
"codeberg.org/gruf/go-errors/v2"
|
"codeberg.org/gruf/go-errors/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type result struct {
|
||||||
|
// Result primary key
|
||||||
|
PKey int64
|
||||||
|
|
||||||
|
// keys accessible under
|
||||||
|
Keys cacheKeys
|
||||||
|
|
||||||
|
// cached value
|
||||||
|
Value any
|
||||||
|
|
||||||
|
// cached error
|
||||||
|
Error error
|
||||||
|
}
|
||||||
|
|
||||||
|
// getResultValue is a safe way of casting and fetching result value.
|
||||||
|
func getResultValue[T any](res *result) T {
|
||||||
|
v, ok := res.Value.(T)
|
||||||
|
if !ok {
|
||||||
|
fmt.Fprintf(os.Stderr, "!! BUG: unexpected value type in result: %T\n", res.Value)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
// Lookup represents a struct object lookup method in the cache.
|
// Lookup represents a struct object lookup method in the cache.
|
||||||
type Lookup struct {
|
type Lookup struct {
|
||||||
// Name is a period ('.') separated string
|
// Name is a period ('.') separated string
|
||||||
|
@ -23,26 +47,23 @@ type Lookup struct {
|
||||||
// Multi allows specifying a key capable of storing
|
// Multi allows specifying a key capable of storing
|
||||||
// multiple results. Note this only supports invalidate.
|
// multiple results. Note this only supports invalidate.
|
||||||
Multi bool
|
Multi bool
|
||||||
|
|
||||||
// TODO: support toggling case sensitive lookups.
|
|
||||||
// CaseSensitive bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache provides a means of caching value structures, along with
|
// Cache provides a means of caching value structures, along with
|
||||||
// the results of attempting to load them. An example usecase of this
|
// the results of attempting to load them. An example usecase of this
|
||||||
// cache would be in wrapping a database, allowing caching of sql.ErrNoRows.
|
// cache would be in wrapping a database, allowing caching of sql.ErrNoRows.
|
||||||
type Cache[Value any] struct {
|
type Cache[T any] struct {
|
||||||
cache ttl.Cache[int64, result[Value]] // underlying result cache
|
cache simple.Cache[int64, *result] // underlying result cache
|
||||||
invalid func(Value) // store unwrapped invalidate callback.
|
|
||||||
lookups structKeys // pre-determined struct lookups
|
lookups structKeys // pre-determined struct lookups
|
||||||
|
invalid func(T) // store unwrapped invalidate callback.
|
||||||
ignore func(error) bool // determines cacheable errors
|
ignore func(error) bool // determines cacheable errors
|
||||||
copy func(Value) Value // copies a Value type
|
copy func(T) T // copies a Value type
|
||||||
next int64 // update key counter
|
next int64 // update key counter
|
||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
|
// New returns a new initialized Cache, with given lookups, underlying value copy function and provided capacity.
|
||||||
func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Value] {
|
func New[T any](lookups []Lookup, copy func(T) T, cap int) *Cache[T] {
|
||||||
var z Value
|
var z T
|
||||||
|
|
||||||
// Determine generic type
|
// Determine generic type
|
||||||
t := reflect.TypeOf(z)
|
t := reflect.TypeOf(z)
|
||||||
|
@ -58,7 +79,7 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate new cache object
|
// Allocate new cache object
|
||||||
c := &Cache[Value]{copy: copy}
|
c := &Cache[T]{copy: copy}
|
||||||
c.lookups = make([]structKey, len(lookups))
|
c.lookups = make([]structKey, len(lookups))
|
||||||
|
|
||||||
for i, lookup := range lookups {
|
for i, lookup := range lookups {
|
||||||
|
@ -67,38 +88,20 @@ func New[Value any](lookups []Lookup, copy func(Value) Value, cap int) *Cache[Va
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create and initialize underlying cache
|
// Create and initialize underlying cache
|
||||||
c.cache.Init(0, cap, 0)
|
c.cache.Init(0, cap)
|
||||||
c.SetEvictionCallback(nil)
|
c.SetEvictionCallback(nil)
|
||||||
c.SetInvalidateCallback(nil)
|
c.SetInvalidateCallback(nil)
|
||||||
c.IgnoreErrors(nil)
|
c.IgnoreErrors(nil)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start will start the cache background eviction routine with given sweep frequency. If already
|
|
||||||
// running or a freq <= 0 provided, this is a no-op. This will block until eviction routine started.
|
|
||||||
func (c *Cache[Value]) Start(freq time.Duration) bool {
|
|
||||||
return c.cache.Start(freq)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop will stop cache background eviction routine. If not running this
|
|
||||||
// is a no-op. This will block until the eviction routine has stopped.
|
|
||||||
func (c *Cache[Value]) Stop() bool {
|
|
||||||
return c.cache.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTTL sets the cache item TTL. Update can be specified to force updates of existing items
|
|
||||||
// in the cache, this will simply add the change in TTL to their current expiry time.
|
|
||||||
func (c *Cache[Value]) SetTTL(ttl time.Duration, update bool) {
|
|
||||||
c.cache.SetTTL(ttl, update)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetEvictionCallback sets the eviction callback to the provided hook.
|
// SetEvictionCallback sets the eviction callback to the provided hook.
|
||||||
func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
|
func (c *Cache[T]) SetEvictionCallback(hook func(T)) {
|
||||||
if hook == nil {
|
if hook == nil {
|
||||||
// Ensure non-nil hook.
|
// Ensure non-nil hook.
|
||||||
hook = func(Value) {}
|
hook = func(T) {}
|
||||||
}
|
}
|
||||||
c.cache.SetEvictionCallback(func(pkey int64, res result[Value]) {
|
c.cache.SetEvictionCallback(func(pkey int64, res *result) {
|
||||||
c.cache.Lock()
|
c.cache.Lock()
|
||||||
for _, key := range res.Keys {
|
for _, key := range res.Keys {
|
||||||
// Delete key->pkey lookup
|
// Delete key->pkey lookup
|
||||||
|
@ -108,23 +111,25 @@ func (c *Cache[Value]) SetEvictionCallback(hook func(Value)) {
|
||||||
c.cache.Unlock()
|
c.cache.Unlock()
|
||||||
|
|
||||||
if res.Error != nil {
|
if res.Error != nil {
|
||||||
// Skip error hooks
|
// Skip value hooks
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call user hook.
|
// Free result and call hook.
|
||||||
hook(res.Value)
|
v := getResultValue[T](res)
|
||||||
|
putResult(res)
|
||||||
|
hook(v)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetInvalidateCallback sets the invalidate callback to the provided hook.
|
// SetInvalidateCallback sets the invalidate callback to the provided hook.
|
||||||
func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
|
func (c *Cache[T]) SetInvalidateCallback(hook func(T)) {
|
||||||
if hook == nil {
|
if hook == nil {
|
||||||
// Ensure non-nil hook.
|
// Ensure non-nil hook.
|
||||||
hook = func(Value) {}
|
hook = func(T) {}
|
||||||
} // store hook.
|
} // store hook.
|
||||||
c.invalid = hook
|
c.invalid = hook
|
||||||
c.cache.SetInvalidateCallback(func(pkey int64, res result[Value]) {
|
c.cache.SetInvalidateCallback(func(pkey int64, res *result) {
|
||||||
c.cache.Lock()
|
c.cache.Lock()
|
||||||
for _, key := range res.Keys {
|
for _, key := range res.Keys {
|
||||||
// Delete key->pkey lookup
|
// Delete key->pkey lookup
|
||||||
|
@ -134,17 +139,19 @@ func (c *Cache[Value]) SetInvalidateCallback(hook func(Value)) {
|
||||||
c.cache.Unlock()
|
c.cache.Unlock()
|
||||||
|
|
||||||
if res.Error != nil {
|
if res.Error != nil {
|
||||||
// Skip error hooks
|
// Skip value hooks
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call user hook.
|
// Free result and call hook.
|
||||||
hook(res.Value)
|
v := getResultValue[T](res)
|
||||||
|
putResult(res)
|
||||||
|
hook(v)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
|
// IgnoreErrors allows setting a function hook to determine which error types should / not be cached.
|
||||||
func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) {
|
func (c *Cache[T]) IgnoreErrors(ignore func(error) bool) {
|
||||||
if ignore == nil {
|
if ignore == nil {
|
||||||
ignore = func(err error) bool {
|
ignore = func(err error) bool {
|
||||||
return errors.Comparable(
|
return errors.Comparable(
|
||||||
|
@ -160,11 +167,10 @@ func (c *Cache[Value]) IgnoreErrors(ignore func(error) bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
|
// Load will attempt to load an existing result from the cacche for the given lookup and key parts, else calling the provided load function and caching the result.
|
||||||
func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts ...any) (Value, error) {
|
func (c *Cache[T]) Load(lookup string, load func() (T, error), keyParts ...any) (T, error) {
|
||||||
var (
|
var (
|
||||||
zero Value
|
zero T
|
||||||
res result[Value]
|
res *result
|
||||||
ok bool
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Get lookup key info by name.
|
// Get lookup key info by name.
|
||||||
|
@ -182,24 +188,22 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
// Look for primary cache key
|
// Look for primary cache key
|
||||||
pkeys := keyInfo.pkeys[ckey]
|
pkeys := keyInfo.pkeys[ckey]
|
||||||
|
|
||||||
if ok = (len(pkeys) > 0); ok {
|
if len(pkeys) > 0 {
|
||||||
var entry *ttl.Entry[int64, result[Value]]
|
|
||||||
|
|
||||||
// Fetch the result for primary key
|
// Fetch the result for primary key
|
||||||
entry, ok = c.cache.Cache.Get(pkeys[0])
|
entry, ok := c.cache.Cache.Get(pkeys[0])
|
||||||
if ok {
|
if ok {
|
||||||
// Since the invalidation / eviction hooks acquire a mutex
|
// Since the invalidation / eviction hooks acquire a mutex
|
||||||
// lock separately, and only at this point are the pkeys
|
// lock separately, and only at this point are the pkeys
|
||||||
// updated, there is a chance that a primary key may return
|
// updated, there is a chance that a primary key may return
|
||||||
// no matching entry. Hence we have to check for it here.
|
// no matching entry. Hence we have to check for it here.
|
||||||
res = entry.Value
|
res = entry.Value.(*result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock
|
// Done with lock
|
||||||
c.cache.Unlock()
|
c.cache.Unlock()
|
||||||
|
|
||||||
if !ok {
|
if res == nil {
|
||||||
// Generate fresh result.
|
// Generate fresh result.
|
||||||
value, err := load()
|
value, err := load()
|
||||||
|
|
||||||
|
@ -209,6 +213,9 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
return zero, err
|
return zero, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Alloc result.
|
||||||
|
res = getResult()
|
||||||
|
|
||||||
// Store error result.
|
// Store error result.
|
||||||
res.Error = err
|
res.Error = err
|
||||||
|
|
||||||
|
@ -219,6 +226,9 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
key: ckey,
|
key: ckey,
|
||||||
}}
|
}}
|
||||||
} else {
|
} else {
|
||||||
|
// Alloc result.
|
||||||
|
res = getResult()
|
||||||
|
|
||||||
// Store value result.
|
// Store value result.
|
||||||
res.Value = value
|
res.Value = value
|
||||||
|
|
||||||
|
@ -251,22 +261,21 @@ func (c *Cache[Value]) Load(lookup string, load func() (Value, error), keyParts
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return a copy of value from cache
|
// Return a copy of value from cache
|
||||||
return c.copy(res.Value), nil
|
return c.copy(getResultValue[T](res)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store will call the given store function, and on success store the value in the cache as a positive result.
|
// Store will call the given store function, and on success store the value in the cache as a positive result.
|
||||||
func (c *Cache[Value]) Store(value Value, store func() error) error {
|
func (c *Cache[T]) Store(value T, store func() error) error {
|
||||||
// Attempt to store this value.
|
// Attempt to store this value.
|
||||||
if err := store(); err != nil {
|
if err := store(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prepare cached result.
|
// Prepare cached result.
|
||||||
result := result[Value]{
|
result := getResult()
|
||||||
Keys: c.lookups.generate(value),
|
result.Keys = c.lookups.generate(value)
|
||||||
Value: c.copy(value),
|
result.Value = c.copy(value)
|
||||||
Error: nil,
|
result.Error = nil
|
||||||
}
|
|
||||||
|
|
||||||
var evict func()
|
var evict func()
|
||||||
|
|
||||||
|
@ -293,9 +302,8 @@ func (c *Cache[Value]) Store(value Value, store func() error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Has checks the cache for a positive result under the given lookup and key parts.
|
// Has checks the cache for a positive result under the given lookup and key parts.
|
||||||
func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
|
func (c *Cache[T]) Has(lookup string, keyParts ...any) bool {
|
||||||
var res result[Value]
|
var res *result
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// Get lookup key info by name.
|
// Get lookup key info by name.
|
||||||
keyInfo := c.lookups.get(lookup)
|
keyInfo := c.lookups.get(lookup)
|
||||||
|
@ -312,29 +320,27 @@ func (c *Cache[Value]) Has(lookup string, keyParts ...any) bool {
|
||||||
// Look for primary key for cache key
|
// Look for primary key for cache key
|
||||||
pkeys := keyInfo.pkeys[ckey]
|
pkeys := keyInfo.pkeys[ckey]
|
||||||
|
|
||||||
if ok = (len(pkeys) > 0); ok {
|
if len(pkeys) > 0 {
|
||||||
var entry *ttl.Entry[int64, result[Value]]
|
|
||||||
|
|
||||||
// Fetch the result for primary key
|
// Fetch the result for primary key
|
||||||
entry, ok = c.cache.Cache.Get(pkeys[0])
|
entry, ok := c.cache.Cache.Get(pkeys[0])
|
||||||
if ok {
|
if ok {
|
||||||
// Since the invalidation / eviction hooks acquire a mutex
|
// Since the invalidation / eviction hooks acquire a mutex
|
||||||
// lock separately, and only at this point are the pkeys
|
// lock separately, and only at this point are the pkeys
|
||||||
// updated, there is a chance that a primary key may return
|
// updated, there is a chance that a primary key may return
|
||||||
// no matching entry. Hence we have to check for it here.
|
// no matching entry. Hence we have to check for it here.
|
||||||
res = entry.Value
|
res = entry.Value.(*result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Done with lock
|
// Done with lock
|
||||||
c.cache.Unlock()
|
c.cache.Unlock()
|
||||||
|
|
||||||
// Check for non-error result.
|
// Check for result AND non-error result.
|
||||||
return ok && (res.Error == nil)
|
return (res != nil && res.Error == nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Invalidate will invalidate any result from the cache found under given lookup and key parts.
|
// Invalidate will invalidate any result from the cache found under given lookup and key parts.
|
||||||
func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
|
func (c *Cache[T]) Invalidate(lookup string, keyParts ...any) {
|
||||||
// Get lookup key info by name.
|
// Get lookup key info by name.
|
||||||
keyInfo := c.lookups.get(lookup)
|
keyInfo := c.lookups.get(lookup)
|
||||||
|
|
||||||
|
@ -351,15 +357,20 @@ func (c *Cache[Value]) Invalidate(lookup string, keyParts ...any) {
|
||||||
c.cache.InvalidateAll(pkeys...)
|
c.cache.InvalidateAll(pkeys...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear empties the cache, calling the invalidate callback.
|
// Clear empties the cache, calling the invalidate callback where necessary.
|
||||||
func (c *Cache[Value]) Clear() { c.cache.Clear() }
|
func (c *Cache[T]) Clear() { c.Trim(100) }
|
||||||
|
|
||||||
|
// Trim ensures the cache stays within percentage of total capacity, truncating where necessary.
|
||||||
|
func (c *Cache[T]) Trim(perc float64) { c.cache.Trim(perc) }
|
||||||
|
|
||||||
// store will cache this result under all of its required cache keys.
|
// store will cache this result under all of its required cache keys.
|
||||||
func (c *Cache[Value]) store(res result[Value]) (evict func()) {
|
func (c *Cache[T]) store(res *result) (evict func()) {
|
||||||
|
var toEvict []*result
|
||||||
|
|
||||||
// Get primary key
|
// Get primary key
|
||||||
pnext := c.next
|
res.PKey = c.next
|
||||||
c.next++
|
c.next++
|
||||||
if pnext > c.next {
|
if res.PKey > c.next {
|
||||||
panic("cache primary key overflow")
|
panic("cache primary key overflow")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,15 +382,19 @@ func (c *Cache[Value]) store(res result[Value]) (evict func()) {
|
||||||
for _, conflict := range pkeys {
|
for _, conflict := range pkeys {
|
||||||
// Get the overlapping result with this key.
|
// Get the overlapping result with this key.
|
||||||
entry, _ := c.cache.Cache.Get(conflict)
|
entry, _ := c.cache.Cache.Get(conflict)
|
||||||
|
confRes := entry.Value.(*result)
|
||||||
|
|
||||||
// From conflicting entry, drop this key, this
|
// From conflicting entry, drop this key, this
|
||||||
// will prevent eviction cleanup key confusion.
|
// will prevent eviction cleanup key confusion.
|
||||||
entry.Value.Keys.drop(key.info.name)
|
confRes.Keys.drop(key.info.name)
|
||||||
|
|
||||||
if len(entry.Value.Keys) == 0 {
|
if len(res.Keys) == 0 {
|
||||||
// We just over-wrote the only lookup key for
|
// We just over-wrote the only lookup key for
|
||||||
// this value, so we drop its primary key too.
|
// this value, so we drop its primary key too.
|
||||||
c.cache.Cache.Delete(conflict)
|
c.cache.Cache.Delete(conflict)
|
||||||
|
|
||||||
|
// Add finished result to evict queue.
|
||||||
|
toEvict = append(toEvict, confRes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,42 +403,27 @@ func (c *Cache[Value]) store(res result[Value]) (evict func()) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store primary key lookup.
|
// Store primary key lookup.
|
||||||
pkeys = append(pkeys, pnext)
|
pkeys = append(pkeys, res.PKey)
|
||||||
key.info.pkeys[key.key] = pkeys
|
key.info.pkeys[key.key] = pkeys
|
||||||
}
|
}
|
||||||
|
|
||||||
// Store main entry under primary key, using evict hook if needed
|
// Store main entry under primary key, catch evicted.
|
||||||
c.cache.Cache.SetWithHook(pnext, &ttl.Entry[int64, result[Value]]{
|
c.cache.Cache.SetWithHook(res.PKey, &simple.Entry{
|
||||||
Expiry: c.expiry(),
|
Key: res.PKey,
|
||||||
Key: pnext,
|
|
||||||
Value: res,
|
Value: res,
|
||||||
}, func(_ int64, item *ttl.Entry[int64, result[Value]]) {
|
}, func(_ int64, item *simple.Entry) {
|
||||||
evict = func() { c.cache.Evict(item.Key, item.Value) }
|
toEvict = append(toEvict, item.Value.(*result))
|
||||||
})
|
})
|
||||||
|
|
||||||
return evict
|
if len(toEvict) == 0 {
|
||||||
}
|
// none evicted.
|
||||||
|
return nil
|
||||||
//go:linkname runtime_nanotime runtime.nanotime
|
}
|
||||||
func runtime_nanotime() uint64
|
|
||||||
|
return func() {
|
||||||
// expiry returns an the next expiry time to use for an entry,
|
for _, res := range toEvict {
|
||||||
// which is equivalent to time.Now().Add(ttl), or zero if disabled.
|
// Call evict hook on each entry.
|
||||||
func (c *Cache[Value]) expiry() uint64 {
|
c.cache.Evict(res.PKey, res)
|
||||||
if ttl := c.cache.TTL; ttl > 0 {
|
}
|
||||||
return runtime_nanotime() +
|
|
||||||
uint64(c.cache.TTL)
|
|
||||||
}
|
}
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type result[Value any] struct {
|
|
||||||
// keys accessible under
|
|
||||||
Keys cacheKeys
|
|
||||||
|
|
||||||
// cached value
|
|
||||||
Value Value
|
|
||||||
|
|
||||||
// cached error
|
|
||||||
Error error
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -145,7 +145,7 @@ type structField struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// genKey generates a cache key string for given key parts (i.e. serializes them using "go-mangler").
|
// genKey generates a cache key string for given key parts (i.e. serializes them using "go-mangler").
|
||||||
func (sk structKey) genKey(parts []any) string {
|
func (sk *structKey) genKey(parts []any) string {
|
||||||
// Check this expected no. key parts.
|
// Check this expected no. key parts.
|
||||||
if len(parts) != len(sk.fields) {
|
if len(parts) != len(sk.fields) {
|
||||||
panic(fmt.Sprintf("incorrect no. key parts provided: want=%d received=%d", len(parts), len(sk.fields)))
|
panic(fmt.Sprintf("incorrect no. key parts provided: want=%d received=%d", len(parts), len(sk.fields)))
|
||||||
|
@ -246,10 +246,12 @@ var bufPool = sync.Pool{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getBuf ...
|
||||||
func getBuf() *byteutil.Buffer {
|
func getBuf() *byteutil.Buffer {
|
||||||
return bufPool.Get().(*byteutil.Buffer)
|
return bufPool.Get().(*byteutil.Buffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putBuf ...
|
||||||
func putBuf(buf *byteutil.Buffer) {
|
func putBuf(buf *byteutil.Buffer) {
|
||||||
if buf.Cap() > int(^uint16(0)) {
|
if buf.Cap() > int(^uint16(0)) {
|
||||||
return // drop large bufs
|
return // drop large bufs
|
||||||
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
package result
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// resultPool is a global pool for result
|
||||||
|
// objects, regardless of cache type.
|
||||||
|
var resultPool sync.Pool
|
||||||
|
|
||||||
|
// getEntry fetches a result from pool, or allocates new.
|
||||||
|
func getResult() *result {
|
||||||
|
v := resultPool.Get()
|
||||||
|
if v == nil {
|
||||||
|
return new(result)
|
||||||
|
}
|
||||||
|
return v.(*result)
|
||||||
|
}
|
||||||
|
|
||||||
|
// putResult replaces a result in the pool.
|
||||||
|
func putResult(r *result) {
|
||||||
|
r.Keys = nil
|
||||||
|
r.Value = nil
|
||||||
|
r.Error = nil
|
||||||
|
resultPool.Put(r)
|
||||||
|
}
|
|
@ -0,0 +1,454 @@
|
||||||
|
package simple
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"codeberg.org/gruf/go-maps"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Entry represents an item in the cache.
|
||||||
|
type Entry struct {
|
||||||
|
Key any
|
||||||
|
Value any
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own.
|
||||||
|
type Cache[Key comparable, Value any] struct {
|
||||||
|
// Evict is the hook that is called when an item is evicted from the cache.
|
||||||
|
Evict func(Key, Value)
|
||||||
|
|
||||||
|
// Invalid is the hook that is called when an item's data in the cache is invalidated, includes Add/Set.
|
||||||
|
Invalid func(Key, Value)
|
||||||
|
|
||||||
|
// Cache is the underlying hashmap used for this cache.
|
||||||
|
Cache maps.LRUMap[Key, *Entry]
|
||||||
|
|
||||||
|
// Embedded mutex.
|
||||||
|
sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new initialized Cache with given initial length, maximum capacity and item TTL.
|
||||||
|
func New[K comparable, V any](len, cap int) *Cache[K, V] {
|
||||||
|
c := new(Cache[K, V])
|
||||||
|
c.Init(len, cap)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init will initialize this cache with given initial length, maximum capacity and item TTL.
|
||||||
|
func (c *Cache[K, V]) Init(len, cap int) {
|
||||||
|
c.SetEvictionCallback(nil)
|
||||||
|
c.SetInvalidateCallback(nil)
|
||||||
|
c.Cache.Init(len, cap)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEvictionCallback: implements cache.Cache's SetEvictionCallback().
|
||||||
|
func (c *Cache[K, V]) SetEvictionCallback(hook func(K, V)) {
|
||||||
|
c.locked(func() { c.Evict = hook })
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetInvalidateCallback: implements cache.Cache's SetInvalidateCallback().
|
||||||
|
func (c *Cache[K, V]) SetInvalidateCallback(hook func(K, V)) {
|
||||||
|
c.locked(func() { c.Invalid = hook })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get: implements cache.Cache's Get().
|
||||||
|
func (c *Cache[K, V]) Get(key K) (V, bool) {
|
||||||
|
var (
|
||||||
|
// did exist in cache?
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
// cached value.
|
||||||
|
v V
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
var item *Entry
|
||||||
|
|
||||||
|
// Check for item in cache
|
||||||
|
item, ok = c.Cache.Get(key)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set item value.
|
||||||
|
v = item.Value.(V)
|
||||||
|
})
|
||||||
|
|
||||||
|
return v, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add: implements cache.Cache's Add().
|
||||||
|
func (c *Cache[K, V]) Add(key K, value V) bool {
|
||||||
|
var (
|
||||||
|
// did exist in cache?
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
// was entry evicted?
|
||||||
|
ev bool
|
||||||
|
|
||||||
|
// evicted key values.
|
||||||
|
evcK K
|
||||||
|
evcV V
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
evict func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
// Check if in cache.
|
||||||
|
ok = c.Cache.Has(key)
|
||||||
|
if ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Alloc new entry.
|
||||||
|
new := getEntry()
|
||||||
|
new.Key = key
|
||||||
|
new.Value = value
|
||||||
|
|
||||||
|
// Add new entry to cache and catched any evicted item.
|
||||||
|
c.Cache.SetWithHook(key, new, func(_ K, item *Entry) {
|
||||||
|
evcK = item.Key.(K)
|
||||||
|
evcV = item.Value.(V)
|
||||||
|
ev = true
|
||||||
|
putEntry(item)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Set hook func ptr.
|
||||||
|
evict = c.Evict
|
||||||
|
})
|
||||||
|
|
||||||
|
if ev && evict != nil {
|
||||||
|
// Pass to eviction hook.
|
||||||
|
evict(evcK, evcV)
|
||||||
|
}
|
||||||
|
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set: implements cache.Cache's Set().
|
||||||
|
func (c *Cache[K, V]) Set(key K, value V) {
|
||||||
|
var (
|
||||||
|
// did exist in cache?
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
// was entry evicted?
|
||||||
|
ev bool
|
||||||
|
|
||||||
|
// old value.
|
||||||
|
oldV V
|
||||||
|
|
||||||
|
// evicted key values.
|
||||||
|
evcK K
|
||||||
|
evcV V
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
invalid func(K, V)
|
||||||
|
evict func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
var item *Entry
|
||||||
|
|
||||||
|
// Check for item in cache
|
||||||
|
item, ok = c.Cache.Get(key)
|
||||||
|
|
||||||
|
if ok {
|
||||||
|
// Set old value.
|
||||||
|
oldV = item.Value.(V)
|
||||||
|
|
||||||
|
// Update the existing item.
|
||||||
|
item.Value = value
|
||||||
|
} else {
|
||||||
|
// Alloc new entry.
|
||||||
|
new := getEntry()
|
||||||
|
new.Key = key
|
||||||
|
new.Value = value
|
||||||
|
|
||||||
|
// Add new entry to cache and catched any evicted item.
|
||||||
|
c.Cache.SetWithHook(key, new, func(_ K, item *Entry) {
|
||||||
|
evcK = item.Key.(K)
|
||||||
|
evcV = item.Value.(V)
|
||||||
|
ev = true
|
||||||
|
putEntry(item)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set hook func ptrs.
|
||||||
|
invalid = c.Invalid
|
||||||
|
evict = c.Evict
|
||||||
|
})
|
||||||
|
|
||||||
|
if ok && invalid != nil {
|
||||||
|
// Pass to invalidate hook.
|
||||||
|
invalid(key, oldV)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ev && evict != nil {
|
||||||
|
// Pass to eviction hook.
|
||||||
|
evict(evcK, evcV)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAS: implements cache.Cache's CAS().
|
||||||
|
func (c *Cache[K, V]) CAS(key K, old V, new V, cmp func(V, V) bool) bool {
|
||||||
|
var (
|
||||||
|
// did exist in cache?
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
// swapped value.
|
||||||
|
oldV V
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
invalid func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
var item *Entry
|
||||||
|
|
||||||
|
// Check for item in cache
|
||||||
|
item, ok = c.Cache.Get(key)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set old value.
|
||||||
|
oldV = item.Value.(V)
|
||||||
|
|
||||||
|
// Perform the comparison
|
||||||
|
if !cmp(old, oldV) {
|
||||||
|
var zero V
|
||||||
|
oldV = zero
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update value.
|
||||||
|
item.Value = new
|
||||||
|
|
||||||
|
// Set hook func ptr.
|
||||||
|
invalid = c.Invalid
|
||||||
|
})
|
||||||
|
|
||||||
|
if ok && invalid != nil {
|
||||||
|
// Pass to invalidate hook.
|
||||||
|
invalid(key, oldV)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap: implements cache.Cache's Swap().
|
||||||
|
func (c *Cache[K, V]) Swap(key K, swp V) V {
|
||||||
|
var (
|
||||||
|
// did exist in cache?
|
||||||
|
ok bool
|
||||||
|
|
||||||
|
// swapped value.
|
||||||
|
oldV V
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
invalid func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
var item *Entry
|
||||||
|
|
||||||
|
// Check for item in cache
|
||||||
|
item, ok = c.Cache.Get(key)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set old value.
|
||||||
|
oldV = item.Value.(V)
|
||||||
|
|
||||||
|
// Update value.
|
||||||
|
item.Value = swp
|
||||||
|
|
||||||
|
// Set hook func ptr.
|
||||||
|
invalid = c.Invalid
|
||||||
|
})
|
||||||
|
|
||||||
|
if ok && invalid != nil {
|
||||||
|
// Pass to invalidate hook.
|
||||||
|
invalid(key, oldV)
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldV
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has: implements cache.Cache's Has().
|
||||||
|
func (c *Cache[K, V]) Has(key K) (ok bool) {
|
||||||
|
c.locked(func() {
|
||||||
|
ok = c.Cache.Has(key)
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invalidate: implements cache.Cache's Invalidate().
|
||||||
|
func (c *Cache[K, V]) Invalidate(key K) (ok bool) {
|
||||||
|
var (
|
||||||
|
// old value.
|
||||||
|
oldV V
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
invalid func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
var item *Entry
|
||||||
|
|
||||||
|
// Check for item in cache
|
||||||
|
item, ok = c.Cache.Get(key)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set old value.
|
||||||
|
oldV = item.Value.(V)
|
||||||
|
|
||||||
|
// Remove from cache map
|
||||||
|
_ = c.Cache.Delete(key)
|
||||||
|
|
||||||
|
// Free entry
|
||||||
|
putEntry(item)
|
||||||
|
|
||||||
|
// Set hook func ptrs.
|
||||||
|
invalid = c.Invalid
|
||||||
|
})
|
||||||
|
|
||||||
|
if ok && invalid != nil {
|
||||||
|
// Pass to invalidate hook.
|
||||||
|
invalid(key, oldV)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// InvalidateAll: implements cache.Cache's InvalidateAll().
|
||||||
|
func (c *Cache[K, V]) InvalidateAll(keys ...K) (ok bool) {
|
||||||
|
var (
|
||||||
|
// deleted items.
|
||||||
|
items []*Entry
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
invalid func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Allocate a slice for invalidated.
|
||||||
|
items = make([]*Entry, 0, len(keys))
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
for x := range keys {
|
||||||
|
var item *Entry
|
||||||
|
|
||||||
|
// Check for item in cache
|
||||||
|
item, ok = c.Cache.Get(keys[x])
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Append this old value.
|
||||||
|
items = append(items, item)
|
||||||
|
|
||||||
|
// Remove from cache map
|
||||||
|
_ = c.Cache.Delete(keys[x])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set hook func ptrs.
|
||||||
|
invalid = c.Invalid
|
||||||
|
})
|
||||||
|
|
||||||
|
if invalid != nil {
|
||||||
|
for x := range items {
|
||||||
|
// Pass to invalidate hook.
|
||||||
|
k := items[x].Key.(K)
|
||||||
|
v := items[x].Value.(V)
|
||||||
|
invalid(k, v)
|
||||||
|
|
||||||
|
// Free this entry.
|
||||||
|
putEntry(items[x])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear: implements cache.Cache's Clear().
|
||||||
|
func (c *Cache[K, V]) Clear() { c.Trim(100) }
|
||||||
|
|
||||||
|
// Trim will truncate the cache to ensure it stays within given percentage of total capacity.
|
||||||
|
func (c *Cache[K, V]) Trim(perc float64) {
|
||||||
|
var (
|
||||||
|
// deleted items
|
||||||
|
items []*Entry
|
||||||
|
|
||||||
|
// hook func ptrs.
|
||||||
|
invalid func(K, V)
|
||||||
|
)
|
||||||
|
|
||||||
|
c.locked(func() {
|
||||||
|
// Calculate number of cache items to truncate.
|
||||||
|
max := (perc / 100) * float64(c.Cache.Cap())
|
||||||
|
diff := c.Cache.Len() - int(max)
|
||||||
|
if diff <= 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set hook func ptr.
|
||||||
|
invalid = c.Invalid
|
||||||
|
|
||||||
|
// Truncate by calculated length.
|
||||||
|
items = c.truncate(diff, invalid)
|
||||||
|
})
|
||||||
|
|
||||||
|
if invalid != nil {
|
||||||
|
for x := range items {
|
||||||
|
// Pass to invalidate hook.
|
||||||
|
k := items[x].Key.(K)
|
||||||
|
v := items[x].Value.(V)
|
||||||
|
invalid(k, v)
|
||||||
|
|
||||||
|
// Free this entry.
|
||||||
|
putEntry(items[x])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len: implements cache.Cache's Len().
|
||||||
|
func (c *Cache[K, V]) Len() (l int) {
|
||||||
|
c.locked(func() { l = c.Cache.Len() })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cap: implements cache.Cache's Cap().
|
||||||
|
func (c *Cache[K, V]) Cap() (l int) {
|
||||||
|
c.locked(func() { l = c.Cache.Cap() })
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// locked performs given function within mutex lock (NOTE: UNLOCK IS NOT DEFERRED).
|
||||||
|
func (c *Cache[K, V]) locked(fn func()) {
|
||||||
|
c.Lock()
|
||||||
|
fn()
|
||||||
|
c.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// truncate will truncate the cache by given size, returning deleted items.
|
||||||
|
func (c *Cache[K, V]) truncate(sz int, hook func(K, V)) []*Entry {
|
||||||
|
if hook == nil {
|
||||||
|
// No hook to execute, simply release all truncated entries.
|
||||||
|
c.Cache.Truncate(sz, func(_ K, item *Entry) { putEntry(item) })
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate a slice for deleted.
|
||||||
|
deleted := make([]*Entry, 0, sz)
|
||||||
|
|
||||||
|
// Truncate and catch all deleted k-v pairs.
|
||||||
|
c.Cache.Truncate(sz, func(_ K, item *Entry) {
|
||||||
|
deleted = append(deleted, item)
|
||||||
|
})
|
||||||
|
|
||||||
|
return deleted
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package simple
|
||||||
|
|
||||||
|
import "sync"
|
||||||
|
|
||||||
|
// entryPool is a global pool for Entry
|
||||||
|
// objects, regardless of cache type.
|
||||||
|
var entryPool sync.Pool
|
||||||
|
|
||||||
|
// getEntry fetches an Entry from pool, or allocates new.
|
||||||
|
func getEntry() *Entry {
|
||||||
|
v := entryPool.Get()
|
||||||
|
if v == nil {
|
||||||
|
return new(Entry)
|
||||||
|
}
|
||||||
|
return v.(*Entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// putEntry replaces an Entry in the pool.
|
||||||
|
func putEntry(e *Entry) {
|
||||||
|
e.Key = nil
|
||||||
|
e.Value = nil
|
||||||
|
entryPool.Put(e)
|
||||||
|
}
|
|
@ -15,7 +15,7 @@ type Entry[Key comparable, Value any] struct {
|
||||||
Expiry uint64
|
Expiry uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cache is the underlying Cache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own.
|
// Cache is the underlying TTLCache implementation, providing both the base Cache interface and unsafe access to underlying map to allow flexibility in building your own.
|
||||||
type Cache[Key comparable, Value any] struct {
|
type Cache[Key comparable, Value any] struct {
|
||||||
// TTL is the cache item TTL.
|
// TTL is the cache item TTL.
|
||||||
TTL time.Duration
|
TTL time.Duration
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Binaries for programs and plugins
|
||||||
|
*.exe
|
||||||
|
*.exe~
|
||||||
|
*.dll
|
||||||
|
*.so
|
||||||
|
*.dylib
|
||||||
|
|
||||||
|
# Test binary, built with `go test -c`
|
||||||
|
*.test
|
||||||
|
|
||||||
|
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||||
|
*.out
|
||||||
|
|
||||||
|
# Dependency directories (remove the comment below to include it)
|
||||||
|
# vendor/
|
||||||
|
|
||||||
|
example
|
||||||
|
.idea
|
||||||
|
go.sum
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2020 Dmitriy Titov (Дмитрий Титов)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,48 @@
|
||||||
|
# size - calculates variable's memory consumption at runtime
|
||||||
|
|
||||||
|
### Part of the [Transflow Project](http://transflow.ru/)
|
||||||
|
|
||||||
|
Sometimes you may need a tool to measure the size of object in your Go program at runtime. This package makes an attempt to do so. Package based on `binary.Size()` from Go standard library.
|
||||||
|
|
||||||
|
Features:
|
||||||
|
- supports non-fixed size variables and struct fields: `struct`, `int`, `slice`, `string`, `map`;
|
||||||
|
- supports complex types including structs with non-fixed size fields;
|
||||||
|
- supports all basic types (numbers, bool);
|
||||||
|
- supports `chan` and `interface`;
|
||||||
|
- supports pointers;
|
||||||
|
- implements infinite recursion detection (i.e. pointer inside struct field references to parent struct).
|
||||||
|
|
||||||
|
### Usage example
|
||||||
|
|
||||||
|
```
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
// Use latest tag.
|
||||||
|
"github.com/DmitriyVTitov/size"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
a := struct {
|
||||||
|
a int
|
||||||
|
b string
|
||||||
|
c bool
|
||||||
|
d int32
|
||||||
|
e []byte
|
||||||
|
f [3]int64
|
||||||
|
}{
|
||||||
|
a: 10, // 8 bytes
|
||||||
|
b: "Text", // 16 (string itself) + 4 = 20 bytes
|
||||||
|
c: true, // 1 byte
|
||||||
|
d: 25, // 4 bytes
|
||||||
|
e: []byte{'c', 'd', 'e'}, // 24 (slice itself) + 3 = 27 bytes
|
||||||
|
f: [3]int64{1, 2, 3}, // 3 * 8 = 24 bytes
|
||||||
|
} // 84 + 3 (padding) = 87 bytes
|
||||||
|
|
||||||
|
fmt.Println(size.Of(a))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output: 87
|
||||||
|
```
|
|
@ -0,0 +1,142 @@
|
||||||
|
// Package size implements run-time calculation of size of the variable.
|
||||||
|
// Source code is based on "binary.Size()" function from Go standard library.
|
||||||
|
// size.Of() omits size of slices, arrays and maps containers itself (24, 24 and 8 bytes).
|
||||||
|
// When counting maps separate calculations are done for keys and values.
|
||||||
|
package size
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Of returns the size of 'v' in bytes.
|
||||||
|
// If there is an error during calculation, Of returns -1.
|
||||||
|
func Of(v interface{}) int {
|
||||||
|
// Cache with every visited pointer so we don't count two pointers
|
||||||
|
// to the same memory twice.
|
||||||
|
cache := make(map[uintptr]bool)
|
||||||
|
return sizeOf(reflect.Indirect(reflect.ValueOf(v)), cache)
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeOf returns the number of bytes the actual data represented by v occupies in memory.
|
||||||
|
// If there is an error, sizeOf returns -1.
|
||||||
|
func sizeOf(v reflect.Value, cache map[uintptr]bool) int {
|
||||||
|
switch v.Kind() {
|
||||||
|
|
||||||
|
case reflect.Array:
|
||||||
|
sum := 0
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
s := sizeOf(v.Index(i), cache)
|
||||||
|
if s < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
sum += s
|
||||||
|
}
|
||||||
|
|
||||||
|
return sum + (v.Cap()-v.Len())*int(v.Type().Elem().Size())
|
||||||
|
|
||||||
|
case reflect.Slice:
|
||||||
|
// return 0 if this node has been visited already
|
||||||
|
if cache[v.Pointer()] {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
cache[v.Pointer()] = true
|
||||||
|
|
||||||
|
sum := 0
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
s := sizeOf(v.Index(i), cache)
|
||||||
|
if s < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
sum += s
|
||||||
|
}
|
||||||
|
|
||||||
|
sum += (v.Cap() - v.Len()) * int(v.Type().Elem().Size())
|
||||||
|
|
||||||
|
return sum + int(v.Type().Size())
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
sum := 0
|
||||||
|
for i, n := 0, v.NumField(); i < n; i++ {
|
||||||
|
s := sizeOf(v.Field(i), cache)
|
||||||
|
if s < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
sum += s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look for struct padding.
|
||||||
|
padding := int(v.Type().Size())
|
||||||
|
for i, n := 0, v.NumField(); i < n; i++ {
|
||||||
|
padding -= int(v.Field(i).Type().Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
return sum + padding
|
||||||
|
|
||||||
|
case reflect.String:
|
||||||
|
s := v.String()
|
||||||
|
hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||||
|
if cache[hdr.Data] {
|
||||||
|
return int(v.Type().Size())
|
||||||
|
}
|
||||||
|
cache[hdr.Data] = true
|
||||||
|
return len(s) + int(v.Type().Size())
|
||||||
|
|
||||||
|
case reflect.Ptr:
|
||||||
|
// return Ptr size if this node has been visited already (infinite recursion)
|
||||||
|
if cache[v.Pointer()] {
|
||||||
|
return int(v.Type().Size())
|
||||||
|
}
|
||||||
|
cache[v.Pointer()] = true
|
||||||
|
if v.IsNil() {
|
||||||
|
return int(reflect.New(v.Type()).Type().Size())
|
||||||
|
}
|
||||||
|
s := sizeOf(reflect.Indirect(v), cache)
|
||||||
|
if s < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return s + int(v.Type().Size())
|
||||||
|
|
||||||
|
case reflect.Bool,
|
||||||
|
reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
||||||
|
reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||||
|
reflect.Int, reflect.Uint,
|
||||||
|
reflect.Chan,
|
||||||
|
reflect.Uintptr,
|
||||||
|
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128,
|
||||||
|
reflect.Func:
|
||||||
|
return int(v.Type().Size())
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
// return 0 if this node has been visited already (infinite recursion)
|
||||||
|
if cache[v.Pointer()] {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
cache[v.Pointer()] = true
|
||||||
|
sum := 0
|
||||||
|
keys := v.MapKeys()
|
||||||
|
for i := range keys {
|
||||||
|
val := v.MapIndex(keys[i])
|
||||||
|
// calculate size of key and value separately
|
||||||
|
sv := sizeOf(val, cache)
|
||||||
|
if sv < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
sum += sv
|
||||||
|
sk := sizeOf(keys[i], cache)
|
||||||
|
if sk < 0 {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
sum += sk
|
||||||
|
}
|
||||||
|
// Include overhead due to unused map buckets. 10.79 comes
|
||||||
|
// from https://golang.org/src/runtime/map.go.
|
||||||
|
return sum + int(v.Type().Size()) + int(float64(len(keys))*10.79)
|
||||||
|
|
||||||
|
case reflect.Interface:
|
||||||
|
return sizeOf(v.Elem(), cache) + int(v.Type().Size())
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -13,10 +13,11 @@ codeberg.org/gruf/go-bytesize
|
||||||
# codeberg.org/gruf/go-byteutil v1.1.2
|
# codeberg.org/gruf/go-byteutil v1.1.2
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
codeberg.org/gruf/go-byteutil
|
codeberg.org/gruf/go-byteutil
|
||||||
# codeberg.org/gruf/go-cache/v3 v3.4.4
|
# codeberg.org/gruf/go-cache/v3 v3.5.3
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
codeberg.org/gruf/go-cache/v3
|
codeberg.org/gruf/go-cache/v3
|
||||||
codeberg.org/gruf/go-cache/v3/result
|
codeberg.org/gruf/go-cache/v3/result
|
||||||
|
codeberg.org/gruf/go-cache/v3/simple
|
||||||
codeberg.org/gruf/go-cache/v3/ttl
|
codeberg.org/gruf/go-cache/v3/ttl
|
||||||
# codeberg.org/gruf/go-debug v1.3.0
|
# codeberg.org/gruf/go-debug v1.3.0
|
||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
|
@ -69,6 +70,9 @@ codeberg.org/gruf/go-sched
|
||||||
codeberg.org/gruf/go-store/v2/kv
|
codeberg.org/gruf/go-store/v2/kv
|
||||||
codeberg.org/gruf/go-store/v2/storage
|
codeberg.org/gruf/go-store/v2/storage
|
||||||
codeberg.org/gruf/go-store/v2/util
|
codeberg.org/gruf/go-store/v2/util
|
||||||
|
# github.com/DmitriyVTitov/size v1.5.0
|
||||||
|
## explicit; go 1.14
|
||||||
|
github.com/DmitriyVTitov/size
|
||||||
# github.com/KimMachineGun/automemlimit v0.2.6
|
# github.com/KimMachineGun/automemlimit v0.2.6
|
||||||
## explicit; go 1.19
|
## explicit; go 1.19
|
||||||
github.com/KimMachineGun/automemlimit
|
github.com/KimMachineGun/automemlimit
|
||||||
|
|
Loading…
Reference in New Issue