Merge branch 'mapsafe'

# Conflicts:
#	cache/map.go
#	route/route.go
This commit is contained in:
xing 2022-10-13 17:13:45 +08:00
commit f18bbcd1e6
7 changed files with 143 additions and 98 deletions

75
cache/map.go vendored
View File

@ -4,13 +4,13 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github/fthvgb1/wp-go/safeMap"
"sync" "sync"
"sync/atomic"
"time" "time"
) )
type MapCache[K comparable, V any] struct { type MapCache[K comparable, V any] struct {
data safeMap.Map[K, mapCacheStruct[V]] data atomic.Value
mutex *sync.Mutex mutex *sync.Mutex
cacheFunc func(...any) (V, error) cacheFunc func(...any) (V, error)
batchCacheFn func(...any) (map[K]V, error) batchCacheFn func(...any) (map[K]V, error)
@ -18,7 +18,9 @@ type MapCache[K comparable, V any] struct {
} }
func NewMapCache[K comparable, V any](expireTime time.Duration) *MapCache[K, V] { func NewMapCache[K comparable, V any](expireTime time.Duration) *MapCache[K, V] {
return &MapCache[K, V]{expireTime: expireTime} var v atomic.Value
v.Store(make(map[K]mapCacheStruct[V]))
return &MapCache[K, V]{expireTime: expireTime, data: v}
} }
type mapCacheStruct[T any] struct { type mapCacheStruct[T any] struct {
@ -32,7 +34,7 @@ func (m *MapCache[K, V]) SetCacheFunc(fn func(...any) (V, error)) {
} }
func (m *MapCache[K, V]) GetSetTime(k K) (t time.Time) { func (m *MapCache[K, V]) GetSetTime(k K) (t time.Time) {
r, ok := m.data.Load(k) r, ok := m.data.Load().(map[K]mapCacheStruct[V])[k]
if ok { if ok {
t = r.setTime t = r.setTime
} }
@ -59,19 +61,23 @@ func (m *MapCache[K, V]) setCacheFn(fn func(...any) (map[K]V, error)) {
} }
func NewMapCacheByFn[K comparable, V any](fn func(...any) (V, error), expireTime time.Duration) *MapCache[K, V] { func NewMapCacheByFn[K comparable, V any](fn func(...any) (V, error), expireTime time.Duration) *MapCache[K, V] {
var d atomic.Value
d.Store(make(map[K]mapCacheStruct[V]))
return &MapCache[K, V]{ return &MapCache[K, V]{
mutex: &sync.Mutex{}, mutex: &sync.Mutex{},
cacheFunc: fn, cacheFunc: fn,
expireTime: expireTime, expireTime: expireTime,
data: safeMap.NewMap[K, mapCacheStruct[V]](), data: d,
} }
} }
func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), expireTime time.Duration) *MapCache[K, V] { func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), expireTime time.Duration) *MapCache[K, V] {
var d atomic.Value
d.Store(make(map[K]mapCacheStruct[V]))
r := &MapCache[K, V]{ r := &MapCache[K, V]{
mutex: &sync.Mutex{}, mutex: &sync.Mutex{},
batchCacheFn: fn, batchCacheFn: fn,
expireTime: expireTime, expireTime: expireTime,
data: safeMap.NewMap[K, mapCacheStruct[V]](), data: d,
} }
r.setCacheFn(fn) r.setCacheFn(fn)
return r return r
@ -80,16 +86,13 @@ func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error),
func (m *MapCache[K, V]) Flush() { func (m *MapCache[K, V]) Flush() {
m.mutex.Lock() m.mutex.Lock()
defer m.mutex.Unlock() defer m.mutex.Unlock()
m.data = safeMap.NewMap[K, mapCacheStruct[V]]() var d atomic.Value
d.Store(make(map[K]mapCacheStruct[V]))
m.data = d
} }
func (m *MapCache[K, V]) Get(k K) V { func (m *MapCache[K, V]) Get(k K) V {
r, ok := m.data.Load(k) return m.data.Load().(map[K]mapCacheStruct[V])[k].data
if ok {
return r.data
}
var rr V
return rr
} }
func (m *MapCache[K, V]) Set(k K, v V) { func (m *MapCache[K, V]) Set(k K, v V) {
@ -111,24 +114,26 @@ func (m *MapCache[K, V]) SetByBatchFn(params ...any) error {
} }
func (m *MapCache[K, V]) set(k K, v V) { func (m *MapCache[K, V]) set(k K, v V) {
data, ok := m.data.Load(k) d, ok := m.data.Load().(map[K]mapCacheStruct[V])
t := time.Now() t := time.Now()
data := d[k]
if !ok { if !ok {
data.data = v data.data = v
data.setTime = t data.setTime = t
data.incr++ data.incr++
m.data.Store(k, data)
} else { } else {
m.data.Store(k, mapCacheStruct[V]{ data = mapCacheStruct[V]{
data: v, data: v,
setTime: t, setTime: t,
})
} }
}
d[k] = data
m.data.Store(d)
} }
func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duration, params ...any) (V, error) { func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duration, params ...any) (V, error) {
data, ok := m.data.Load(key) d := m.data.Load().(map[K]mapCacheStruct[V])
data, ok := d[key]
if !ok { if !ok {
data = mapCacheStruct[V]{} data = mapCacheStruct[V]{}
} }
@ -139,11 +144,12 @@ func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duratio
if !ok || (ok && m.expireTime >= 0 && expired) { if !ok || (ok && m.expireTime >= 0 && expired) {
t := data.incr t := data.incr
call := func() { call := func() {
m.mutex.Lock() tmp, o := m.data.Load().(map[K]mapCacheStruct[V])[key]
defer m.mutex.Unlock() if o && tmp.incr > t {
if data.incr > t {
return return
} }
m.mutex.Lock()
defer m.mutex.Unlock()
r, er := m.cacheFunc(params...) r, er := m.cacheFunc(params...)
if err != nil { if err != nil {
err = er err = er
@ -151,8 +157,9 @@ func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duratio
} }
data.setTime = time.Now() data.setTime = time.Now()
data.data = r data.data = r
m.data.Store(key, data)
data.incr++ data.incr++
d[key] = data
m.data.Store(d)
} }
if timeout > 0 { if timeout > 0 {
ctx, cancel := context.WithTimeout(c, timeout) ctx, cancel := context.WithTimeout(c, timeout)
@ -180,8 +187,9 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
var res []V var res []V
t := 0 t := 0
now := time.Duration(time.Now().UnixNano()) now := time.Duration(time.Now().UnixNano())
data := m.data.Load().(map[K]mapCacheStruct[V])
for _, k := range key { for _, k := range key {
d, ok := m.data.Load(k) d, ok := data[k]
if !ok { if !ok {
needFlush = append(needFlush, k) needFlush = append(needFlush, k)
continue continue
@ -196,17 +204,17 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
//todo 这里应该判断下取出的值是否为零值,不过怎么操作呢? //todo 这里应该判断下取出的值是否为零值,不过怎么操作呢?
if len(needFlush) > 0 { if len(needFlush) > 0 {
call := func() { call := func() {
m.mutex.Lock()
defer m.mutex.Unlock()
tt := 0 tt := 0
for _, dd := range needFlush { for _, dd := range needFlush {
if ddd, ok := m.data.Load(dd); ok { if ddd, ok := data[dd]; ok {
tt = tt + ddd.incr tt = tt + ddd.incr
} }
} }
if tt > t { if tt > t {
return return
} }
m.mutex.Lock()
defer m.mutex.Unlock()
r, er := m.batchCacheFn(params...) r, er := m.batchCacheFn(params...)
if err != nil { if err != nil {
err = er err = er
@ -234,11 +242,9 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time.
} }
} }
for _, k := range key { for _, k := range key {
d, ok := m.data.Load(k) d := data[k]
if ok {
res = append(res, d.data) res = append(res, d.data)
} }
}
return res, err return res, err
} }
@ -246,10 +252,11 @@ func (m *MapCache[K, V]) ClearExpired() {
now := time.Duration(time.Now().UnixNano()) now := time.Duration(time.Now().UnixNano())
m.mutex.Lock() m.mutex.Lock()
defer m.mutex.Unlock() defer m.mutex.Unlock()
m.data.Range(func(k K, v mapCacheStruct[V]) bool { data := m.data.Load().(map[K]mapCacheStruct[V])
for k, v := range data {
if now > time.Duration(v.setTime.UnixNano())+m.expireTime { if now > time.Duration(v.setTime.UnixNano())+m.expireTime {
m.data.Delete(k) delete(data, k)
} }
return true }
}) m.data.Store(data)
} }

View File

@ -5,11 +5,17 @@ import (
"fmt" "fmt"
"github.com/dlclark/regexp2" "github.com/dlclark/regexp2"
"io" "io"
"math/rand"
"reflect" "reflect"
"regexp" "regexp"
"strings" "strings"
) )
type IntNumber interface {
~int | ~int64 | ~int32 | ~int8 | ~int16 |
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64
}
func ToAny[T any](v T) any { func ToAny[T any](v T) any {
return v return v
} }
@ -33,7 +39,7 @@ func StructColumn[T any, M any](arr []M, field string) (r []T) {
return return
} }
func RangeSlice[T ~int | ~uint | ~int64 | ~int8 | ~int16 | ~int32 | ~uint64](start, end, step T) []T { func RangeSlice[T IntNumber](start, end, step T) []T {
if step == 0 { if step == 0 {
panic("step can't be 0") panic("step can't be 0")
} }
@ -250,3 +256,8 @@ func SliceToMap[K comparable, V, T any](arr []V, fn func(V) (K, T), isCoverPrev
} }
return m return m
} }
func RandNum[T IntNumber](start, end T) T {
end++
return T(rand.Int63n(int64(end-start))) + start
}

View File

@ -578,3 +578,32 @@ func TestSimpleSliceToMap(t *testing.T) {
}) })
} }
} }
func TestRandNum(t *testing.T) {
type args struct {
start int
end int
}
tests := []struct {
name string
args args
}{
{
name: "t1",
args: args{
start: 1,
end: 2,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
for i := 0; i < 100; i++ {
got := RandNum(tt.args.start, tt.args.end)
if got > tt.args.end || got < tt.args.start {
t.Errorf("RandNum() = %v, range error", got)
}
}
})
}
}

View File

@ -8,10 +8,12 @@ import (
"github/fthvgb1/wp-go/plugins" "github/fthvgb1/wp-go/plugins"
"github/fthvgb1/wp-go/route" "github/fthvgb1/wp-go/route"
"github/fthvgb1/wp-go/vars" "github/fthvgb1/wp-go/vars"
"math/rand"
"time" "time"
) )
func init() { func init() {
rand.Seed(time.Now().UnixNano())
err := vars.InitConfig() err := vars.InitConfig()
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -2,32 +2,14 @@ package middleware
import ( import (
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
"math/rand"
"net/http" "net/http"
"strings" "strings"
"sync"
"sync/atomic" "sync/atomic"
"time" "time"
) )
type IpLimitMap struct { func FlowLimit(maxRequestSleepNum, maxRequestNum int64, sleepTime []time.Duration) func(ctx *gin.Context) {
mux *sync.Mutex
m map[string]*int64
singleIpSearchNum int64
}
func FlowLimit(maxRequestSleepNum, maxRequestNum, singleIpSearchNum int64, sleepTime []time.Duration) func(ctx *gin.Context) {
var flow int64 var flow int64
rand.Seed(time.Now().UnixNano())
randFn := func(start, end time.Duration) time.Duration {
end++
return time.Duration(rand.Intn(int(end-start)) + int(start))
}
m := IpLimitMap{
mux: &sync.Mutex{},
m: make(map[string]*int64),
singleIpSearchNum: singleIpSearchNum,
}
statPath := map[string]struct{}{ statPath := map[string]struct{}{
"wp-includes": {}, "wp-includes": {},
"wp-content": {}, "wp-content": {},
@ -40,58 +22,19 @@ func FlowLimit(maxRequestSleepNum, maxRequestNum, singleIpSearchNum int64, sleep
c.Next() c.Next()
return return
} }
s := false
ip := c.ClientIP()
defer m.searchLimit(false, c, ip, f, &s)
if m.searchLimit(true, c, ip, f, &s) {
c.Abort()
return
}
atomic.AddInt64(&flow, 1) atomic.AddInt64(&flow, 1)
defer func() { defer func() {
atomic.AddInt64(&flow, -1) atomic.AddInt64(&flow, -1)
}() }()
if flow >= maxRequestSleepNum && flow <= maxRequestNum { if flow >= maxRequestSleepNum && flow <= maxRequestNum {
t := randFn(sleepTime[0], sleepTime[1]) //t := helper.RandNum(sleepTime[0], sleepTime[1])
time.Sleep(t) //time.Sleep(t)
} else if flow > maxRequestNum { } else if flow > maxRequestNum {
c.String(http.StatusForbidden, "请求太多了,服务器君表示压力山大==!, 请稍后访问") c.String(http.StatusForbidden, "请求太多了,服务器君表示压力山大==!, 请稍后访问")
c.Abort() c.Abort()
return return
} }
c.Next() c.Next()
} }
} }
func (m *IpLimitMap) searchLimit(start bool, c *gin.Context, ip string, f []string, s *bool) (isForbid bool) {
if f[0] == "" && c.Query("s") != "" {
if start {
i, ok := m.m[ip]
if !ok {
m.mux.Lock()
i = new(int64)
m.m[ip] = i
m.mux.Unlock()
}
if m.singleIpSearchNum > 0 && *i >= m.singleIpSearchNum {
isForbid = true
return
}
*s = true
atomic.AddInt64(i, 1)
return
}
i, ok := m.m[ip]
if ok && *s && *i > 0 {
atomic.AddInt64(i, -1)
if *i == 0 {
m.mux.Lock()
delete(m.m, ip)
m.mux.Unlock()
}
}
}
return
}

53
middleware/iplimit.go Normal file
View File

@ -0,0 +1,53 @@
package middleware
import (
"github.com/gin-gonic/gin"
"net/http"
"sync"
"sync/atomic"
)
type IpLimitMap struct {
mux *sync.Mutex
m map[string]*int64
limitNum int64
}
func IpLimit(num int64) func(ctx *gin.Context) {
m := IpLimitMap{
mux: &sync.Mutex{},
m: make(map[string]*int64),
limitNum: num,
}
return func(c *gin.Context) {
ip := c.ClientIP()
s := false
defer func() {
i, ok := m.m[ip]
if ok && s && *i > 0 {
//time.Sleep(time.Second * 3)
atomic.AddInt64(i, -1)
if *i == 0 {
m.mux.Lock()
delete(m.m, ip)
m.mux.Unlock()
}
}
}()
i, ok := m.m[ip]
if !ok {
m.mux.Lock()
i = new(int64)
m.m[ip] = i
m.mux.Unlock()
}
if m.limitNum > 0 && *i >= m.limitNum {
c.Status(http.StatusForbidden)
c.Abort()
return
}
s = true
atomic.AddInt64(i, 1)
}
}

View File

@ -229,7 +229,7 @@ func SimplePagination[T Model](where ParseWhere, fields, group string, page, pag
return return
} }
func FindOneById[T Model, I ~int | ~uint64 | ~int64 | ~int32](id I) (T, error) { func FindOneById[T Model, I helper.IntNumber](id I) (T, error) {
var r T var r T
sql := fmt.Sprintf("select * from `%s` where `%s`=?", r.Table(), r.PrimaryKey()) sql := fmt.Sprintf("select * from `%s` where `%s`=?", r.Table(), r.PrimaryKey())
err := db.Db.Get(&r, sql, id) err := db.Db.Get(&r, sql, id)