improve cache interface and memorymapcache.go implement

This commit is contained in:
xing 2023-10-27 20:51:46 +08:00
parent f0c1744998
commit acb064b762
10 changed files with 155 additions and 130 deletions

View File

@ -34,12 +34,12 @@ func Flush() {
}
}
func FlushMapVal[T any](name string, key T) {
func FlushMapVal[T any](name string, keys ...T) {
v, ok := mapFlush.Load(name)
if !ok {
if !ok || len(keys) < 1 {
return
}
v(key)
v(keys)
}
func FlushAnyVal(name ...string) {
@ -58,9 +58,9 @@ func pushFlushMap[K comparable, V any](m *cache.MapCache[K, V], args ...any) {
m.Flush(ctx)
})
mapFlush.Store(name, func(a any) {
k, ok := a.(K)
if ok {
m.Delete(ctx, k)
k, ok := a.([]K)
if ok && len(k) > 0 {
m.Del(ctx, k...)
}
})
getSingleFn.Store(name, func(ct context.Context, k any, t time.Duration, a ...any) (any, error) {
@ -119,9 +119,8 @@ func parseArgs(args ...any) string {
return name
}
func NewMapCache[K comparable, V any](data cache.Cache[K, V], batchFn cache.MapBatchFn[K, V],
fn cache.MapSingleFn[K, V], expireTime time.Duration, args ...any) *cache.MapCache[K, V] {
m := cache.NewMapCache[K, V](data, fn, batchFn, expireTime)
func NewMapCache[K comparable, V any](data cache.Cache[K, V], batchFn cache.MapBatchFn[K, V], fn cache.MapSingleFn[K, V], args ...any) *cache.MapCache[K, V] {
m := cache.NewMapCache[K, V](data, fn, batchFn)
pushFlushMap(m, args...)
FlushPush(m)
ClearPush(m)
@ -129,7 +128,7 @@ func NewMapCache[K comparable, V any](data cache.Cache[K, V], batchFn cache.MapB
}
func NewMemoryMapCache[K comparable, V any](batchFn cache.MapBatchFn[K, V],
fn cache.MapSingleFn[K, V], expireTime time.Duration, args ...any) *cache.MapCache[K, V] {
return NewMapCache[K, V](cache.NewMemoryMapCache[K, V](), batchFn, fn, expireTime, args...)
return NewMapCache[K, V](cache.NewMemoryMapCache[K, V](expireTime), batchFn, fn, args...)
}
func FlushPush(f ...flush) {

View File

@ -0,0 +1,55 @@
package cachemanager
import (
"context"
"fmt"
"github.com/fthvgb1/wp-go/helper/number"
"github.com/fthvgb1/wp-go/taskPools"
"testing"
"time"
)
func TestFlushMapVal(t *testing.T) {
_ = number.Range(1, 5, 0)
t.Run("t1", func(t *testing.T) {
count := 0
vv := NewMemoryMapCache(func(ctx2 context.Context, ks []int, a ...any) (map[int]int, error) {
r := make(map[int]int)
for _, k := range ks {
r[k] = k * k
}
count++
return r, nil
}, nil, time.Second, "test")
gets, err := GetMultiple[int]("test", ctx, number.Range(1, 10), time.Second)
if err != nil {
t.Fatal(t, "err:", err)
}
p := taskPools.NewPools(10)
for i := 0; i < 20; i++ {
i := i
p.Execute(func() {
if i%2 == 0 {
vv.Get(ctx, 5)
} else {
vv.Set(ctx, i, i)
}
})
}
p.Wait()
fmt.Println(gets, count)
FlushMapVal("test", 3, 4)
fmt.Println(vv.Get(ctx, 3))
fmt.Println(vv.Get(ctx, 4))
get, err := Get[int]("test", ctx, 3, time.Second)
if err != nil {
t.Fatal(t, "err", err)
}
fmt.Println(get, count)
fmt.Println(vv.Get(ctx, 5))
FlushAnyVal("test")
fmt.Println(vv.Get(ctx, 5))
fmt.Println(vv.Get(ctx, 6))
})
}

View File

@ -37,9 +37,9 @@ var safetyMapLock = sync.Mutex{}
var flushMapFn = safety.NewMap[string, func(any)]()
func FlushMapVal[T any](namespace string, key T) {
func FlushMapVal[T any](namespace string, key ...T) {
fn, ok := flushMapFn.Load(namespace)
if !ok {
if !ok || len(key) < 1 {
return
}
fn(key)
@ -89,11 +89,13 @@ func safetyMapFn[K comparable, V, A any](namespace string, args ...any) *safetyM
m = &safetyMap[K, V, A]{safety.NewMap[K, V](), sync.Mutex{}}
ord, _ := parseArgs(args...)
flushMapFn.Store(namespace, func(a any) {
k, ok := a.(K)
if !ok {
k, ok := a.([]K)
if !ok && len(k) > 0 {
return
}
m.val.Delete(k)
for _, key := range k {
m.val.Delete(key)
}
})
Push(func() {
m.val.Flush()

View File

@ -7,7 +7,9 @@ import (
func TestFlushMapVal(t *testing.T) {
t.Run("t1", func(t *testing.T) {
c := 0
v := GetAnyValMapBy("key", 2, struct{}{}, func(a struct{}) int {
c++
return 33
})
fmt.Println(v)

9
cache/cache.go vendored
View File

@ -7,10 +7,11 @@ import (
type Cache[K comparable, V any] interface {
Get(ctx context.Context, key K) (V, bool)
Set(ctx context.Context, key K, val V, expire time.Duration)
Ttl(ctx context.Context, key K, expire time.Duration) time.Duration
Set(ctx context.Context, key K, val V)
GetExpireTime(ctx context.Context) time.Duration
Ttl(ctx context.Context, key K) time.Duration
Ver(ctx context.Context, key K) int
Flush(ctx context.Context)
Delete(ctx context.Context, key K)
ClearExpired(ctx context.Context, expire time.Duration)
Del(ctx context.Context, key ...K)
ClearExpired(ctx context.Context)
}

83
cache/map.go vendored
View File

@ -10,23 +10,21 @@ import (
)
type MapCache[K comparable, V any] struct {
handle Cache[K, V]
Cache[K, V]
mux sync.Mutex
cacheFunc MapSingleFn[K, V]
batchCacheFn MapBatchFn[K, V]
expireTime time.Duration
}
type MapSingleFn[K, V any] func(context.Context, K, ...any) (V, error)
type MapBatchFn[K comparable, V any] func(context.Context, []K, ...any) (map[K]V, error)
func NewMapCache[K comparable, V any](data Cache[K, V], cacheFunc MapSingleFn[K, V], batchCacheFn MapBatchFn[K, V], expireTime time.Duration) *MapCache[K, V] {
func NewMapCache[K comparable, V any](data Cache[K, V], cacheFunc MapSingleFn[K, V], batchCacheFn MapBatchFn[K, V]) *MapCache[K, V] {
r := &MapCache[K, V]{
handle: data,
Cache: data,
mux: sync.Mutex{},
cacheFunc: cacheFunc,
batchCacheFn: batchCacheFn,
expireTime: expireTime,
}
if cacheFunc == nil && batchCacheFn != nil {
r.setDefaultCacheFn(batchCacheFn)
@ -41,7 +39,7 @@ func (m *MapCache[K, V]) SetDefaultBatchFunc(fn MapSingleFn[K, V]) {
var err error
rr := make(map[K]V)
for _, id := range ids {
v, er := fn(ctx, id)
v, er := fn(ctx, id, a...)
if er != nil {
err = errors.Join(er)
continue
@ -55,20 +53,13 @@ func (m *MapCache[K, V]) SetDefaultBatchFunc(fn MapSingleFn[K, V]) {
func (m *MapCache[K, V]) SetCacheFunc(fn MapSingleFn[K, V]) {
m.cacheFunc = fn
}
func (m *MapCache[K, V]) GetHandle() Cache[K, V] {
return m.handle
}
func (m *MapCache[K, V]) Ttl(ctx context.Context, k K) time.Duration {
return m.handle.Ttl(ctx, k, m.expireTime)
}
func (m *MapCache[K, V]) GetLastSetTime(ctx context.Context, k K) (t time.Time) {
tt := m.handle.Ttl(ctx, k, m.expireTime)
tt := m.Ttl(ctx, k)
if tt <= 0 {
return
}
return time.Now().Add(m.handle.Ttl(ctx, k, m.expireTime)).Add(-m.expireTime)
return time.Now().Add(m.Ttl(ctx, k)).Add(-m.GetExpireTime(ctx))
}
func (m *MapCache[K, V]) SetCacheBatchFn(fn MapBatchFn[K, V]) {
@ -92,58 +83,22 @@ func (m *MapCache[K, V]) setDefaultCacheFn(fn MapBatchFn[K, V]) {
}
}
func NewMapCacheByFn[K comparable, V any](cacheType Cache[K, V], fn MapSingleFn[K, V], expireTime time.Duration) *MapCache[K, V] {
r := &MapCache[K, V]{
mux: sync.Mutex{},
cacheFunc: fn,
expireTime: expireTime,
handle: cacheType,
}
r.SetDefaultBatchFunc(fn)
return r
}
func NewMapCacheByBatchFn[K comparable, V any](cacheType Cache[K, V], fn MapBatchFn[K, V], expireTime time.Duration) *MapCache[K, V] {
r := &MapCache[K, V]{
mux: sync.Mutex{},
batchCacheFn: fn,
expireTime: expireTime,
handle: cacheType,
}
r.setDefaultCacheFn(fn)
return r
}
func (m *MapCache[K, V]) Flush(ctx context.Context) {
m.mux.Lock()
defer m.mux.Unlock()
m.handle.Flush(ctx)
}
func (m *MapCache[K, V]) Get(ctx context.Context, k K) (V, bool) {
return m.handle.Get(ctx, k)
}
func (m *MapCache[K, V]) Set(ctx context.Context, k K, v V) {
m.handle.Set(ctx, k, v, m.expireTime)
}
func (m *MapCache[K, V]) Delete(ctx context.Context, k K) {
m.handle.Delete(ctx, k)
}
func (m *MapCache[K, V]) Ver(ctx context.Context, k K) int {
return m.handle.Ver(ctx, k)
m.Cache.Flush(ctx)
}
func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duration, params ...any) (V, error) {
data, ok := m.handle.Get(c, key)
data, ok := m.Get(c, key)
var err error
if !ok || m.handle.Ttl(c, key, m.expireTime) <= 0 {
ver := m.handle.Ver(c, key)
if !ok || m.Ttl(c, key) <= 0 {
ver := m.Ver(c, key)
call := func() {
m.mux.Lock()
defer m.mux.Unlock()
if m.handle.Ver(c, key) > ver {
data, _ = m.handle.Get(c, key)
if m.Ver(c, key) > ver {
data, _ = m.Get(c, key)
return
}
data, err = m.cacheFunc(c, key, params...)
@ -177,10 +132,10 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
var res []V
ver := 0
needFlush := slice.FilterAndMap(key, func(k K) (r K, ok bool) {
if _, ok := m.handle.Get(c, k); !ok {
if _, ok := m.Get(c, k); !ok {
return k, true
}
ver += m.handle.Ver(c, k)
ver += m.Ver(c, k)
return
})
@ -191,7 +146,7 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
defer m.mux.Unlock()
vers := slice.Reduce(needFlush, func(t K, r int) int {
return r + m.handle.Ver(c, t)
return r + m.Ver(c, t)
}, 0)
if vers > ver {
@ -225,13 +180,7 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
}
}
res = slice.FilterAndMap(key, func(k K) (V, bool) {
return m.handle.Get(c, k)
return m.Get(c, k)
})
return res, err
}
func (m *MapCache[K, V]) ClearExpired(ctx context.Context) {
m.mux.Lock()
defer m.mux.Unlock()
m.handle.ClearExpired(ctx, m.expireTime)
}

4
cache/map_test.go vendored
View File

@ -353,12 +353,12 @@ func TestMapCache_Ttl(t *testing.T) {
name: "t1",
m: ca,
args: args[string]{ct, "aa"},
want: ca.expireTime - tx.Sub(txx),
want: ca.GetExpireTime(ct) - tx.Sub(txx),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Printf("过期时间=%v \nttl=%v \n当前时间 =%v\n最后设置时间=%v\n当时时间-最后设置时间=%v ", ca.expireTime, ca.Ttl(ct, "aa"), tx, txx, tx.Sub(txx))
fmt.Printf("过期时间=%v \nttl=%v \n当前时间 =%v\n最后设置时间=%v\n当时时间-最后设置时间=%v ", ca.GetExpireTime(ct), ca.Ttl(ct, "aa"), tx, txx, tx.Sub(txx))
if got := tt.m.Ttl(tt.args.ct, tt.args.k); got != tt.want {
t.Errorf("Ttl() = %v, want %v", got, tt.want)
}

View File

@ -9,19 +9,22 @@ import (
type MemoryMapCache[K comparable, V any] struct {
*safety.Map[K, mapVal[V]]
expireTime time.Duration
}
func NewMemoryMapCacheByFn[K comparable, V any](fn MapSingleFn[K, V], expireTime time.Duration) *MapCache[K, V] {
return &MapCache[K, V]{
handle: NewMemoryMapCache[K, V](),
Cache: NewMemoryMapCache[K, V](expireTime),
cacheFunc: fn,
expireTime: expireTime,
mux: sync.Mutex{},
}
}
func NewMemoryMapCache[K comparable, V any]() *MemoryMapCache[K, V] {
return &MemoryMapCache[K, V]{Map: safety.NewMap[K, mapVal[V]]()}
func NewMemoryMapCache[K comparable, V any](expireTime time.Duration) *MemoryMapCache[K, V] {
return &MemoryMapCache[K, V]{
Map: safety.NewMap[K, mapVal[V]](),
expireTime: expireTime,
}
}
type mapVal[T any] struct {
@ -30,15 +33,24 @@ type mapVal[T any] struct {
data T
}
func (m *MemoryMapCache[K, V]) GetExpireTime(_ context.Context) time.Duration {
return m.expireTime
}
func (m *MemoryMapCache[K, V]) Get(_ context.Context, key K) (r V, ok bool) {
v, ok := m.Load(key)
if ok {
return v.data, true
if !ok {
return
}
r = v.data
t := m.expireTime - time.Now().Sub(v.setTime)
if t <= 0 {
ok = false
}
return
}
func (m *MemoryMapCache[K, V]) Set(_ context.Context, key K, val V, _ time.Duration) {
func (m *MemoryMapCache[K, V]) Set(_ context.Context, key K, val V) {
v, ok := m.Load(key)
t := time.Now()
if ok {
@ -55,12 +67,12 @@ func (m *MemoryMapCache[K, V]) Set(_ context.Context, key K, val V, _ time.Durat
m.Store(key, v)
}
func (m *MemoryMapCache[K, V]) Ttl(_ context.Context, key K, expire time.Duration) time.Duration {
func (m *MemoryMapCache[K, V]) Ttl(_ context.Context, key K) time.Duration {
v, ok := m.Load(key)
if !ok {
return time.Duration(-1)
}
return expire - time.Now().Sub(v.setTime)
return m.expireTime - time.Now().Sub(v.setTime)
}
func (m *MemoryMapCache[K, V]) Ver(_ context.Context, key K) int {
@ -75,15 +87,16 @@ func (m *MemoryMapCache[K, V]) Flush(context.Context) {
m.Map.Flush()
}
func (m *MemoryMapCache[K, V]) Delete(_ context.Context, key K) {
func (m *MemoryMapCache[K, V]) Del(_ context.Context, keys ...K) {
for _, key := range keys {
m.Map.Delete(key)
}
}
func (m *MemoryMapCache[K, V]) ClearExpired(_ context.Context, expire time.Duration) {
func (m *MemoryMapCache[K, V]) ClearExpired(_ context.Context) {
now := time.Duration(time.Now().UnixNano())
m.Range(func(k K, v mapVal[V]) bool {
if now > time.Duration(v.setTime.UnixNano())+expire {
if now > time.Duration(v.setTime.UnixNano())+m.expireTime {
m.Map.Delete(k)
}
return true

View File

@ -15,7 +15,7 @@ var ttt time.Time
func init() {
ctx = context.Background()
mm = *NewMemoryMapCache[string, string]()
mm = *NewMemoryMapCache[string, string](3 * time.Second)
ttt = time.Now()
mm.Store("aa", mapVal[string]{
setTime: ttt,
@ -53,7 +53,7 @@ func TestMemoryMapCache_ClearExpired(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Println(tt.m)
tt.m.ClearExpired(tt.args.in0, tt.args.expire)
tt.m.ClearExpired(tt.args.in0)
time.Sleep(time.Second)
fmt.Println(tt.m)
})
@ -83,7 +83,7 @@ func TestMemoryMapCache_Delete(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
fmt.Println(mm.Get(ctx, "aa"))
tt.m.Delete(tt.args.in0, tt.args.key)
tt.m.Del(tt.args.in0, tt.args.key)
fmt.Println(mm.Get(ctx, "aa"))
})
@ -111,7 +111,7 @@ func TestMemoryMapCache_Flush(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.m.Flush(tt.args.in0)
mm.Set(ctx, "aa", "xx", time.Second)
mm.Set(ctx, "aa", "xx")
fmt.Println(mm.Get(ctx, "aa"))
})
}
@ -180,7 +180,7 @@ func TestMemoryMapCache_Set(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt.m.Set(tt.args.in0, tt.args.key, tt.args.val, tt.args.in3)
tt.m.Set(tt.args.in0, tt.args.key, tt.args.val)
fmt.Println(tt.m.Get(ctx, tt.args.key))
})
}
@ -209,7 +209,7 @@ func TestMemoryMapCache_Ttl(t *testing.T) {
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.m.Ttl(tt.args.in0, tt.args.key, tt.args.expire); got != tt.want {
if got := tt.m.Ttl(tt.args.in0, tt.args.key); got != tt.want {
t.Errorf("Ttl() = %v, want %v", got, tt.want)
}
})
@ -227,7 +227,7 @@ func TestMemoryMapCache_Ver(t *testing.T) {
args args[K]
want int
}
mm.Set(ctx, "aa", "ff", time.Second)
mm.Set(ctx, "aa", "ff")
tests := []testCase[string, string]{
{
name: "t1",

View File

@ -10,21 +10,25 @@ import (
"strconv"
)
func Range[T constraints.Integer](start, end, step T) []T {
if step == 0 {
panic("step can't be 0")
func Range[T constraints.Integer](start, end T, steps ...T) []T {
step := T(1)
if len(steps) > 0 {
step = steps[0]
}
var l int
if step == 0 {
l = int(end - start + 1)
} else {
l = int((end-start+1)/step + 1)
}
l := int((end-start+1)/step + 1)
if l < 0 {
l = 0 - l
l = -l
}
r := make([]T, 0, l)
for i := start; ; {
r = append(r, i)
i = i + step
if (step > 0 && i > end) || (step < 0 && i < end) {
break
}
gap := start
for i := 0; i < l; i++ {
r = append(r, gap)
gap += step
}
return r
}
@ -36,23 +40,23 @@ func Rand[T constraints.Integer](start, end T) T {
}
func Min[T constraints.Integer | constraints.Float](a ...T) T {
min := a[0]
mins := a[0]
for _, t := range a {
if min > t {
min = t
if mins > t {
mins = t
}
}
return min
return mins
}
func Max[T constraints.Integer | constraints.Float](a ...T) T {
max := a[0]
maxs := a[0]
for _, t := range a {
if max < t {
max = t
if maxs < t {
maxs = t
}
}
return max
return maxs
}
func Sum[T constraints.Integer | constraints.Float](a ...T) T {