add pagination cache

This commit is contained in:
xing 2023-12-08 21:33:09 +08:00
parent 74304b5b12
commit 227de8bdc8
6 changed files with 181 additions and 4 deletions

View File

@ -76,6 +76,7 @@ func SetupRouter() *gin.Engine {
r.GET("/p/author/:author/page/:page", actions.ThemeHook(constraints.Author))
r.POST("/login", actions.Login)
r.GET("/p/:id", actions.ThemeHook(constraints.Detail))
r.GET("/p/:id/comment-page-:page", actions.ThemeHook(constraints.Detail))
r.GET("/p/:id/feed", actions.PostFeed)
r.GET("/feed", actions.Feed)
r.GET("/comments/feed", actions.CommentsFeed)

View File

@ -11,8 +11,6 @@ import (
"time"
)
var ctx = context.Background()
var mapFlush = safety.NewMap[string, func(any)]()
var anyFlush = safety.NewMap[string, func()]()
@ -60,6 +58,7 @@ var clears []clearExpired
var flushes []flush
func Flush() {
ctx := context.WithValue(context.Background(), "execFlushBy", "mangerFlushFn")
for _, f := range flushes {
f.Flush(ctx)
}
@ -88,11 +87,13 @@ func PushMangerMap[K comparable, V any](name string, m *cache.MapCache[K, V]) {
}
mapCache.Store(name, m)
anyFlush.Store(name, func() {
ctx := context.WithValue(context.Background(), "ctx", "registerFlush")
m.Flush(ctx)
})
mapFlush.Store(name, func(a any) {
k, ok := a.([]K)
if ok && len(k) > 0 {
ctx := context.WithValue(context.Background(), "ctx", "registerFlush")
m.Del(ctx, k...)
}
})
@ -168,6 +169,25 @@ func parseArgs(args ...any) (string, func() time.Duration) {
return name, fn
}
func NewPaginationCache[K comparable, V any](m *cache.MapCache[string, helper.PaginationData[V]], maxNum int,
dbFn cache.DbFn[K, V], localFn cache.LocalFn[K, V], dbKeyFn func(K, ...any) string, fetchNum int, name string, a ...any) *cache.Pagination[K, V] {
fn := helper.ParseArgs([]func() int(nil), a...)
var ma, fet func() int
if len(fn) > 0 {
ma = fn[0]
if len(fn) > 1 {
fet = fn[1]
}
}
if ma == nil {
ma = reload.FnVal(str.Join("paginationCache-", name, "-maxNum"), maxNum, nil)
}
if fet == nil {
fet = reload.FnVal(str.Join("paginationCache-", name, "-fetchNum"), fetchNum, nil)
}
return cache.NewPagination(m, ma, dbFn, localFn, dbKeyFn, fet, name)
}
func NewMapCache[K comparable, V any](data cache.Cache[K, V], batchFn cache.MapBatchFn[K, V], fn cache.MapSingleFn[K, V], args ...any) *cache.MapCache[K, V] {
inc := helper.ParseArgs((*cache.IncreaseUpdate[K, V])(nil), args...)
m := cache.NewMapCache[K, V](data, fn, batchFn, inc)
@ -213,6 +233,7 @@ func ClearPush(c ...clearExpired) {
}
func ClearExpired() {
ctx := context.WithValue(context.Background(), "execClearExpired", "mangerClearExpiredFn")
for _, c := range clears {
c.ClearExpired(ctx)
}

View File

@ -13,6 +13,8 @@ import (
"time"
)
var ctx = context.Background()
func TestFlushMapVal(t *testing.T) {
_ = number.Range(1, 5, 0)
t.Run("t1", func(t *testing.T) {

145
cache/pagination.go vendored Normal file
View File

@ -0,0 +1,145 @@
package cache
import (
"context"
"errors"
"fmt"
"github.com/fthvgb1/wp-go/helper"
"github.com/fthvgb1/wp-go/helper/number"
str "github.com/fthvgb1/wp-go/helper/strings"
"github.com/fthvgb1/wp-go/safety"
"strings"
"time"
)
type Pagination[K comparable, V any] struct {
*MapCache[string, helper.PaginationData[V]]
maxNum func() int
isSwitch *safety.Map[K, bool]
dbFn func(ctx context.Context, k K, page, limit, totalRaw int, a ...any) ([]V, int, error)
localFn func(ctx context.Context, data []V, k K, page, limit int, a ...any) ([]V, int, error)
batchFetchNum func() int
dbKeyFn func(K K, a ...any) string
name string
}
var switchDb = errors.New("switch Db")
type DbFn[K comparable, V any] func(ctx context.Context, k K, page, limit, totalRaw int, a ...any) ([]V, int, error)
type LocalFn[K comparable, V any] func(ctx context.Context, data []V, k K, page, limit int, a ...any) ([]V, int, error)
func NewPagination[K comparable, V any](m *MapCache[string, helper.PaginationData[V]], maxNum func() int, dbFn DbFn[K, V], localFn LocalFn[K, V], dbKeyFn func(K, ...any) string, batchFetchNum func() int, name string) *Pagination[K, V] {
if dbKeyFn == nil {
dbKeyFn = func(k K, a ...any) string {
s := str.NewBuilder()
for _, v := range append([]any{k}, a...) {
s.Sprintf("%v|", v)
}
return strings.TrimRight(s.String(), "|")
}
}
return &Pagination[K, V]{
MapCache: m,
maxNum: maxNum,
isSwitch: safety.NewMap[K, bool](),
dbFn: dbFn,
localFn: localFn,
batchFetchNum: batchFetchNum,
name: name,
dbKeyFn: dbKeyFn,
}
}
func (p *Pagination[K, V]) Pagination(ctx context.Context, timeout time.Duration, k K, page, limit int, a ...any) ([]V, int, error) {
if is, _ := p.isSwitch.Load(k); is {
return p.paginationByDB(ctx, timeout, k, page, limit, 0, a...)
}
data, total, err := p.paginationByLocal(ctx, timeout, k, page, limit, a...)
if err != nil {
if errors.Is(err, switchDb) {
err = nil
return p.paginationByDB(ctx, timeout, k, page, limit, total, a...)
}
return nil, 0, err
}
return data, total, err
}
func (p *Pagination[K, V]) paginationByLocal(ctx context.Context, timeout time.Duration, k K, page, limit int, a ...any) ([]V, int, error) {
key := fmt.Sprintf("%v", k)
data, ok := p.Get(ctx, key)
if ok {
if p.increaseUpdate != nil && p.refresh != nil {
dat, err := p.increaseUpdates(ctx, timeout, data, key, a...)
if err != nil {
return nil, 0, err
}
if dat.TotalRaw >= p.maxNum() {
return nil, 0, switchDb
}
data = dat
}
return p.localFn(ctx, data.Data, k, page, limit, a...)
}
batchNum := p.batchFetchNum()
da, totalRaw, err := p.fetchDb(ctx, timeout, k, 1, 0, 0, a...)
if err != nil {
return nil, 0, err
}
if totalRaw >= p.maxNum() {
p.isSwitch.Store(k, true)
return nil, totalRaw, switchDb
}
if len(da) < totalRaw {
totalPage := number.DivideCeil(totalRaw, batchNum)
for i := 1; i <= totalPage; i++ {
daa, _, err := p.fetchDb(ctx, timeout, k, i, batchNum, totalRaw, a...)
if err != nil {
return nil, 0, err
}
da = append(da, daa...)
}
}
return p.localFn(ctx, data.Data, k, page, limit, a...)
}
func (p *Pagination[K, V]) paginationByDB(ctx context.Context, timeout time.Duration, k K, page, limit, totalRaw int, a ...any) ([]V, int, error) {
key := p.dbKeyFn(k, append([]any{page, limit}, a...)...)
data, ok := p.Get(ctx, key)
if ok {
return data.Data, data.TotalRaw, nil
}
dat, total, err := p.fetchDb(ctx, timeout, k, page, limit, totalRaw, a...)
if err != nil {
return nil, 0, err
}
data.Data, data.TotalRaw = dat, total
p.Set(ctx, key, data)
return data.Data, data.TotalRaw, err
}
func (p *Pagination[K, V]) fetchDb(ctx context.Context, timeout time.Duration, k K, page, limit, totalRaw int, a ...any) ([]V, int, error) {
var data helper.PaginationData[V]
var err error
fn := func() {
da, total, er := p.dbFn(ctx, k, page, limit, totalRaw, a...)
if er != nil {
err = er
return
}
data.Data = da
data.TotalRaw = total
}
if timeout > 0 {
er := helper.RunFnWithTimeout(ctx, timeout, fn, fmt.Sprintf("fetch %s-[%v]-page[%d]-limit[%d] from db fail", p.name, k, page, limit))
if err == nil && er != nil {
err = er
}
} else {
fn()
}
return data.Data, data.TotalRaw, err
}

6
helper/others.go Normal file
View File

@ -0,0 +1,6 @@
package helper
type PaginationData[T any] struct {
Data []T
TotalRaw int
}

View File

@ -24,7 +24,7 @@ func (c count[T]) Table() string {
}
func pagination[T Model](db dbQuery, ctx context.Context, q *QueryCondition, page, pageSize int) (r []T, total int, err error) {
if page < 1 || pageSize < 1 {
if page < 1 {
return
}
q.Limit = pageSize
@ -65,7 +65,9 @@ func pagination[T Model](db dbQuery, ctx context.Context, q *QueryCondition, pag
} else {
total = q.TotalRow
}
if q.Limit <= 0 {
return
}
offset := 0
if page > 1 {
offset = (page - 1) * q.Limit