This commit is contained in:
xing 2022-10-13 15:59:43 +08:00
parent 1b4175a440
commit 29a133e753
5 changed files with 26 additions and 9 deletions

View File

@ -22,7 +22,6 @@ var monthPostsCache *cache.MapCache[string, []uint64]
var postListIdsCache *cache.MapCache[string, PostIds] var postListIdsCache *cache.MapCache[string, PostIds]
var searchPostIdsCache *cache.MapCache[string, PostIds] var searchPostIdsCache *cache.MapCache[string, PostIds]
var maxPostIdCache *cache.SliceCache[uint64] var maxPostIdCache *cache.SliceCache[uint64]
var TotalRaw int
var usersCache *cache.MapCache[uint64, models.WpUsers] var usersCache *cache.MapCache[uint64, models.WpUsers]
var commentsCache *cache.MapCache[uint64, models.WpComments] var commentsCache *cache.MapCache[uint64, models.WpComments]

View File

@ -80,7 +80,7 @@ func getPostsByIds(ids ...any) (m map[uint64]models.WpPosts, err error) {
} }
func PostLists(ctx context.Context, key string, args ...any) (r []models.WpPosts, total int, err error) { func PostLists(ctx context.Context, key string, args ...any) (r []models.WpPosts, total int, err error) {
ids, err := postListIdsCache.GetCache(ctx, key, time.Second, args...) ids, err := postListIdsCache.GetCache(ctx, key, time.Hour, args...)
if err != nil { if err != nil {
return return
} }
@ -102,9 +102,6 @@ func searchPostIds(args ...any) (ids PostIds, err error) {
ids.Ids = append(ids.Ids, posts.Id) ids.Ids = append(ids.Ids, posts.Id)
} }
ids.Length = total ids.Length = total
if total > TotalRaw {
TotalRaw = total
}
return return
} }

View File

@ -143,7 +143,7 @@ func (h *indexHandle) parseParams() {
h.page = pa h.page = pa
} }
} }
if common.TotalRaw > 0 && common.TotalRaw < (h.page-1)*h.pageSize { if models.TotalRaw > 0 && models.TotalRaw < (h.page-1)*h.pageSize {
h.page = 1 h.page = 1
} }
if h.page > 1 && (h.category != "" || h.search != "" || month != "") { if h.page > 1 && (h.category != "" || h.search != "" || month != "") {

17
cache/map.go vendored
View File

@ -10,7 +10,7 @@ import (
type MapCache[K comparable, V any] struct { type MapCache[K comparable, V any] struct {
data map[K]mapCacheStruct[V] data map[K]mapCacheStruct[V]
mutex *sync.Mutex mutex *sync.RWMutex
cacheFunc func(...any) (V, error) cacheFunc func(...any) (V, error)
batchCacheFn func(...any) (map[K]V, error) batchCacheFn func(...any) (map[K]V, error)
expireTime time.Duration expireTime time.Duration
@ -59,7 +59,7 @@ func (m *MapCache[K, V]) setCacheFn(fn func(...any) (map[K]V, error)) {
func NewMapCacheByFn[K comparable, V any](fn func(...any) (V, error), expireTime time.Duration) *MapCache[K, V] { func NewMapCacheByFn[K comparable, V any](fn func(...any) (V, error), expireTime time.Duration) *MapCache[K, V] {
return &MapCache[K, V]{ return &MapCache[K, V]{
mutex: &sync.Mutex{}, mutex: &sync.RWMutex{},
cacheFunc: fn, cacheFunc: fn,
expireTime: expireTime, expireTime: expireTime,
data: make(map[K]mapCacheStruct[V]), data: make(map[K]mapCacheStruct[V]),
@ -67,7 +67,7 @@ func NewMapCacheByFn[K comparable, V any](fn func(...any) (V, error), expireTime
} }
func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), expireTime time.Duration) *MapCache[K, V] { func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), expireTime time.Duration) *MapCache[K, V] {
r := &MapCache[K, V]{ r := &MapCache[K, V]{
mutex: &sync.Mutex{}, mutex: &sync.RWMutex{},
batchCacheFn: fn, batchCacheFn: fn,
expireTime: expireTime, expireTime: expireTime,
data: make(map[K]mapCacheStruct[V]), data: make(map[K]mapCacheStruct[V]),
@ -83,6 +83,8 @@ func (m *MapCache[K, V]) Flush() {
} }
func (m *MapCache[K, V]) Get(k K) V { func (m *MapCache[K, V]) Get(k K) V {
m.mutex.RLock()
defer m.mutex.RUnlock()
return m.data[k].data return m.data[k].data
} }
@ -123,6 +125,7 @@ func (m *MapCache[K, V]) set(k K, v V) {
} }
func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duration, params ...any) (V, error) { func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duration, params ...any) (V, error) {
m.mutex.RLock()
data, ok := m.data[key] data, ok := m.data[key]
if !ok { if !ok {
data = mapCacheStruct[V]{} data = mapCacheStruct[V]{}
@ -132,6 +135,7 @@ func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duratio
expired := time.Duration(data.setTime.UnixNano())+m.expireTime < now expired := time.Duration(data.setTime.UnixNano())+m.expireTime < now
//todo 这里应该判断下取出的值是否为零值,不过怎么操作呢? //todo 这里应该判断下取出的值是否为零值,不过怎么操作呢?
if !ok || (ok && m.expireTime >= 0 && expired) { if !ok || (ok && m.expireTime >= 0 && expired) {
m.mutex.RUnlock()
t := data.incr t := data.incr
call := func() { call := func() {
m.mutex.Lock() m.mutex.Lock()
@ -167,10 +171,14 @@ func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duratio
} }
} }
m.mutex.TryRLock()
m.mutex.RUnlock()
return data.data, err return data.data, err
} }
func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.Duration, params ...any) ([]V, error) { func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.Duration, params ...any) ([]V, error) {
m.mutex.RLock()
var needFlush []K var needFlush []K
var res []V var res []V
t := 0 t := 0
@ -190,6 +198,7 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
var err error var err error
//todo 这里应该判断下取出的值是否为零值,不过怎么操作呢? //todo 这里应该判断下取出的值是否为零值,不过怎么操作呢?
if len(needFlush) > 0 { if len(needFlush) > 0 {
m.mutex.RUnlock()
call := func() { call := func() {
m.mutex.Lock() m.mutex.Lock()
defer m.mutex.Unlock() defer m.mutex.Unlock()
@ -232,6 +241,8 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
d := m.data[k] d := m.data[k]
res = append(res, d.data) res = append(res, d.data)
} }
m.mutex.TryRLock()
m.mutex.RUnlock()
return res, err return res, err
} }

View File

@ -3,6 +3,7 @@ package models
var Options = make(map[string]string) var Options = make(map[string]string)
var Terms = map[uint64]WpTerms{} var Terms = map[uint64]WpTerms{}
var TermTaxonomy = map[uint64]WpTermTaxonomy{} var TermTaxonomy = map[uint64]WpTermTaxonomy{}
var TotalRaw int
func InitOptions() error { func InitOptions() error {
ops, err := SimpleFind[WpOptions](SqlBuilder{{"autoload", "yes"}}, "option_name, option_value") ops, err := SimpleFind[WpOptions](SqlBuilder{{"autoload", "yes"}}, "option_name, option_value")
@ -36,5 +37,14 @@ func InitTerms() (err error) {
for _, taxonomy := range termTax { for _, taxonomy := range termTax {
TermTaxonomy[taxonomy.TermTaxonomyId] = taxonomy TermTaxonomy[taxonomy.TermTaxonomyId] = taxonomy
} }
r, err := Find[WpPosts](SqlBuilder{
{"post_type", "in", ""},
{"post_status", "in", ""},
}, "count(*) ID", "", nil, nil, 0, []any{"post"}, []any{"publish"})
if err != nil {
panic(err)
}
TotalRaw = int(r[0].Id)
return return
} }