Merge pull request #11 from fthvgb1/dev

Dev
This commit is contained in:
2023-02-03 21:57:09 +08:00 committed by GitHub
commit 0155d29f77
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 35 additions and 28 deletions

3
cache/map.go vendored
View File

@ -152,8 +152,7 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
defer m.mux.Unlock() defer m.mux.Unlock()
vers := slice.Reduce(needFlush, func(t K, r int) int { vers := slice.Reduce(needFlush, func(t K, r int) int {
r += m.data.Ver(c, t) return r + m.data.Ver(c, t)
return r
}, 0) }, 0)
if vers > ver { if vers > ver {

32
cache/map_test.go vendored
View File

@ -4,6 +4,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/fthvgb1/wp-go/helper/slice" "github.com/fthvgb1/wp-go/helper/slice"
"github.com/fthvgb1/wp-go/taskPools"
"reflect" "reflect"
"strings" "strings"
"testing" "testing"
@ -22,9 +23,10 @@ func init() {
} }
ct = context.Background() ct = context.Background()
batchFn = func(a ...any) (map[string]string, error) { batchFn = func(a ...any) (map[string]string, error) {
fmt.Println(a)
arr := a[1].([]string) arr := a[1].([]string)
return slice.SimpleToMap(arr, func(t string) string { return slice.FilterAndToMap(arr, func(t string) (string, string, bool) {
return strings.Repeat(t, 2) return t, strings.Repeat(t, 2), true
}), nil }), nil
} }
ca = *NewMemoryMapCacheByFn[string, string](fn, time.Second*2) ca = *NewMemoryMapCacheByFn[string, string](fn, time.Second*2)
@ -190,14 +192,36 @@ func TestMapCache_GetCacheBatch(t *testing.T) {
c: ct, c: ct,
key: []string{"xx", "oo"}, key: []string{"xx", "oo"},
timeout: time.Second, timeout: time.Second,
params: []any{ct, []string{"xx", "oo"}}, params: []any{ct, []string{"xx", "oo", "aa"}},
}, },
want: []string{"xxxx", "oooo"}, want: []string{"xxxx", "oooo", "aaaa"},
wantErr: false, wantErr: false,
}, },
} }
time.Sleep(2 * time.Second)
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
p := taskPools.NewPools(10)
for i := 0; i < 800000; i++ {
p.Execute(func() {
c := context.Background()
//time.Sleep(time.Millisecond * number.Rand[time.Duration](200, 400))
a, err := ca.GetCacheBatch(c, []string{"xx", "oo", "aa"}, time.Hour, c, []string{"xx", "oo", "aa"})
if err != nil {
panic(err)
return
}
if a[0] == "xxxx" && a[1] == "oooo" && a[2] == "aaaa" {
} else {
fmt.Println(a)
panic("xxx")
}
//fmt.Println(x)
})
}
p.Wait()
got, err := tt.m.GetCacheBatch(tt.args.c, tt.args.key, tt.args.timeout, tt.args.params...) got, err := tt.m.GetCacheBatch(tt.args.c, tt.args.key, tt.args.timeout, tt.args.params...)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("GetCacheBatch() error = %v, wantErr %v", err, tt.wantErr) t.Errorf("GetCacheBatch() error = %v, wantErr %v", err, tt.wantErr)

View File

@ -64,17 +64,9 @@ func ChunkFind[T Model](ctx context.Context, perLimit int, q *QueryCondition) (r
if 1 == i { if 1 == i {
rr, total, err = SimplePagination[T](ctx, q.where, q.fields, q.group, i, perLimit, q.order, q.join, q.having, q.in...) rr, total, err = SimplePagination[T](ctx, q.where, q.fields, q.group, i, perLimit, q.order, q.join, q.having, q.in...)
} else { } else {
rr, err = Finds[T](ctx, Conditions( q.offset = offset
Where(q.where), q.limit = perLimit
Fields(q.fields), rr, err = Finds[T](ctx, q)
Group(q.group),
Having(q.having),
Join(q.join),
Order(q.order),
Offset(offset),
Limit(perLimit),
In(q.in...),
))
} }
offset += perLimit offset += perLimit
if (err != nil && err != sql.ErrNoRows) || len(rr) < 1 { if (err != nil && err != sql.ErrNoRows) || len(rr) < 1 {
@ -102,17 +94,9 @@ func Chunk[T Model, R any](ctx context.Context, perLimit int, fn func(rows T) (R
if 1 == i { if 1 == i {
rr, total, err = SimplePagination[T](ctx, q.where, q.fields, q.group, i, perLimit, q.order, q.join, q.having, q.in...) rr, total, err = SimplePagination[T](ctx, q.where, q.fields, q.group, i, perLimit, q.order, q.join, q.having, q.in...)
} else { } else {
rr, err = Finds[T](ctx, Conditions( q.offset = offset
Where(q.where), q.limit = perLimit
Fields(q.fields), rr, err = Finds[T](ctx, q)
Group(q.group),
Having(q.having),
Join(q.join),
Order(q.order),
Offset(offset),
Limit(perLimit),
In(q.in...),
))
} }
offset += perLimit offset += perLimit
if (err != nil && err != sql.ErrNoRows) || len(rr) < 1 { if (err != nil && err != sql.ErrNoRows) || len(rr) < 1 {