From edc0e75d3add35be0d2c448c956292e6ecdfdeb2 Mon Sep 17 00:00:00 2001 From: xing Date: Thu, 13 Oct 2022 16:26:31 +0800 Subject: [PATCH] =?UTF-8?q?=E6=94=B9=E7=89=88map?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cache/map.go | 48 +++--- route/route.go | 2 +- safeMap/safemap.go | 394 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 424 insertions(+), 20 deletions(-) create mode 100644 safeMap/safemap.go diff --git a/cache/map.go b/cache/map.go index 2e43191..25f87f4 100644 --- a/cache/map.go +++ b/cache/map.go @@ -4,12 +4,13 @@ import ( "context" "errors" "fmt" + "github/fthvgb1/wp-go/safeMap" "sync" "time" ) type MapCache[K comparable, V any] struct { - data map[K]mapCacheStruct[V] + data safeMap.Map[K, mapCacheStruct[V]] mutex *sync.Mutex cacheFunc func(...any) (V, error) batchCacheFn func(...any) (map[K]V, error) @@ -31,7 +32,7 @@ func (m *MapCache[K, V]) SetCacheFunc(fn func(...any) (V, error)) { } func (m *MapCache[K, V]) GetSetTime(k K) (t time.Time) { - r, ok := m.data[k] + r, ok := m.data.Load(k) if ok { t = r.setTime } @@ -62,7 +63,7 @@ func NewMapCacheByFn[K comparable, V any](fn func(...any) (V, error), expireTime mutex: &sync.Mutex{}, cacheFunc: fn, expireTime: expireTime, - data: make(map[K]mapCacheStruct[V]), + data: safeMap.NewMap[K, mapCacheStruct[V]](), } } func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), expireTime time.Duration) *MapCache[K, V] { @@ -70,7 +71,7 @@ func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), mutex: &sync.Mutex{}, batchCacheFn: fn, expireTime: expireTime, - data: make(map[K]mapCacheStruct[V]), + data: safeMap.NewMap[K, mapCacheStruct[V]](), } r.setCacheFn(fn) return r @@ -79,11 +80,16 @@ func NewMapCacheByBatchFn[K comparable, V any](fn func(...any) (map[K]V, error), func (m *MapCache[K, V]) Flush() { m.mutex.Lock() defer m.mutex.Unlock() - m.data = make(map[K]mapCacheStruct[V]) + m.data = safeMap.NewMap[K, mapCacheStruct[V]]() } func (m *MapCache[K, V]) Get(k K) V { - return m.data[k].data + r, ok := m.data.Load(k) + if ok { + return r.data + } + var rr V + return rr } func (m *MapCache[K, V]) Set(k K, v V) { @@ -107,23 +113,24 @@ func (m *MapCache[K, V]) SetByBatchFn(params ...any) error { } func (m *MapCache[K, V]) set(k K, v V) { - data, ok := m.data[k] + data, ok := m.data.Load(k) t := time.Now() if !ok { data.data = v data.setTime = t data.incr++ - m.data[k] = data + m.data.Store(k, data) } else { - m.data[k] = mapCacheStruct[V]{ + m.data.Store(k, mapCacheStruct[V]{ data: v, setTime: t, - } + }) + } } func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duration, params ...any) (V, error) { - data, ok := m.data[key] + data, ok := m.data.Load(key) if !ok { data = mapCacheStruct[V]{} } @@ -146,7 +153,7 @@ func (m *MapCache[K, V]) GetCache(c context.Context, key K, timeout time.Duratio } data.setTime = time.Now() data.data = r - m.data[key] = data + m.data.Store(key, data) data.incr++ } if timeout > 0 { @@ -176,7 +183,7 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time. t := 0 now := time.Duration(time.Now().UnixNano()) for _, k := range key { - d, ok := m.data[k] + d, ok := m.data.Load(k) if !ok { needFlush = append(needFlush, k) continue @@ -195,7 +202,7 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time. defer m.mutex.Unlock() tt := 0 for _, dd := range needFlush { - if ddd, ok := m.data[dd]; ok { + if ddd, ok := m.data.Load(dd); ok { tt = tt + ddd.incr } } @@ -229,8 +236,10 @@ func (m *MapCache[K, V]) GetCacheBatch(c context.Context, key []K, timeout time. } } for _, k := range key { - d := m.data[k] - res = append(res, d.data) + d, ok := m.data.Load(k) + if ok { + res = append(res, d.data) + } } return res, err } @@ -239,9 +248,10 @@ func (m *MapCache[K, V]) ClearExpired() { now := time.Duration(time.Now().UnixNano()) m.mutex.Lock() defer m.mutex.Unlock() - for k, v := range m.data { + m.data.Range(func(k K, v mapCacheStruct[V]) bool { if now > time.Duration(v.setTime.UnixNano())+m.expireTime { - delete(m.data, k) + m.data.Delete(k) } - } + return true + }) } diff --git a/route/route.go b/route/route.go index 09cf0ef..34efae7 100644 --- a/route/route.go +++ b/route/route.go @@ -39,7 +39,7 @@ func SetupRouter() *gin.Engine { r.Use( middleware.ValidateServerNames(), gin.Logger(), - middleware.FlowLimit(vars.Conf.MaxRequestSleepNum, vars.Conf.MaxRequestNum, vars.Conf.SingleIpSearchNum, vars.Conf.SleepTime), + //middleware.FlowLimit(vars.Conf.MaxRequestSleepNum, vars.Conf.MaxRequestNum, vars.Conf.SingleIpSearchNum, vars.Conf.SleepTime), gin.Recovery(), middleware.SetStaticFileCache, ) diff --git a/safeMap/safemap.go b/safeMap/safemap.go new file mode 100644 index 0000000..253d317 --- /dev/null +++ b/safeMap/safemap.go @@ -0,0 +1,394 @@ +package safeMap + +import ( + "sync" + "sync/atomic" + "unsafe" +) + +// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// by multiple goroutines without additional locking or coordination. +// Loads, stores, and deletes run in amortized constant time. +// +// The Map type is specialized. Most code should use a plain Go map instead, +// with separate locking or coordination, for better type safety and to make it +// easier to maintain other invariants along with the map content. +// +// The Map type is optimized for two common use cases: (1) when the entry for a given +// key is only ever written once but read many times, as in caches that only grow, +// or (2) when multiple goroutines read, write, and overwrite entries for disjoint +// sets of keys. In these two cases, use of a Map may significantly reduce lock +// contention compared to a Go map paired with a separate Mutex or RWMutex. +// +// The zero Map is empty and ready for use. A Map must not be copied after first use. +type Map[K comparable, V any] struct { + mu sync.Mutex + + // read contains the portion of the map's contents that are safe for + // concurrent access (with or without mu held). + // + // The read field itself is always safe to load, but must only be stored with + // mu held. + // + // Entries stored in read may be updated concurrently without mu, but updating + // a previously-expunged entry requires that the entry be copied to the dirty + // map and unexpunged with mu held. + read atomic.Value // readOnly + + // dirty contains the portion of the map's contents that require mu to be + // held. To ensure that the dirty map can be promoted to the read map quickly, + // it also includes all the non-expunged entries in the read map. + // + // Expunged entries are not stored in the dirty map. An expunged entry in the + // clean map must be unexpunged and added to the dirty map before a new value + // can be stored to it. + // + // If the dirty map is nil, the next write to the map will initialize it by + // making a shallow copy of the clean map, omitting stale entries. + dirty map[K]*entry[V] + + // misses counts the number of loads since the read map was last updated that + // needed to lock mu to determine whether the key was present. + // + // Once enough misses have occurred to cover the cost of copying the dirty + // map, the dirty map will be promoted to the read map (in the unamended + // state) and the next store to the map will make a new dirty copy. + misses int + + expunged unsafe.Pointer +} + +func NewMap[K comparable, V any]() Map[K, V] { + var r V + return Map[K, V]{ + expunged: unsafe.Pointer(&r), + } +} + +// readOnly is an immutable struct stored atomically in the Map.read field. +type readOnly[K comparable, V any] struct { + m map[K]*entry[V] + amended bool // true if the dirty map contains some key not in m. +} + +// An entry is a slot in the map corresponding to a particular key. +type entry[V any] struct { + // p points to the interface{} value stored for the entry. + // + // If p == nil, the entry has been deleted, and either m.dirty == nil or + // m.dirty[key] is e. + // + // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry + // is missing from m.dirty. + // + // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty + // != nil, in m.dirty[key]. + // + // An entry can be deleted by atomic replacement with nil: when m.dirty is + // next created, it will atomically replace nil with expunged and leave + // m.dirty[key] unset. + // + // An entry's associated value can be updated by atomic replacement, provided + // p != expunged. If p == expunged, an entry's associated value can be updated + // only after first setting m.dirty[key] = e so that lookups using the dirty + // map find the entry. + p unsafe.Pointer // *interface{} +} + +func newEntry[V any](i V) *entry[V] { + return &entry[V]{p: unsafe.Pointer(&i)} +} + +// Load returns the value stored in the map for a key, or nil if no +// value is present. +// The ok result indicates whether value was found in the map. +func (m *Map[K, V]) Load(key K) (value V, ok bool) { + read, _ := m.read.Load().(readOnly[K, V]) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + // Avoid reporting a spurious miss if m.dirty got promoted while we were + // blocked on m.mu. (If further loads of the same key will not miss, it's + // not worth copying the dirty map for this key.) + read, _ = m.read.Load().(readOnly[K, V]) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if !ok { + var r V + return r, false + } + return e.load(m.expunged) +} + +func (e *entry[V]) load(px unsafe.Pointer) (value V, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == nil || p == px { + var r V + return r, false + } + return *(*V)(p), true +} + +// Store sets the value for a key. +func (m *Map[K, V]) Store(key K, value V) { + read, _ := m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok && e.tryStore(&value, m.expunged) { + return + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked(m.expunged) { + // The entry was previously expunged, which implies that there is a + // non-nil dirty map and this entry is not in it. + m.dirty[key] = e + } + e.storeLocked(&value) + } else if e, ok := m.dirty[key]; ok { + e.storeLocked(&value) + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry(value) + } + m.mu.Unlock() +} + +// tryStore stores a value if the entry has not been expunged. +// +// If the entry is expunged, tryStore returns false and leaves the entry +// unchanged. +func (e *entry[V]) tryStore(i *V, px unsafe.Pointer) bool { + for { + p := atomic.LoadPointer(&e.p) + if p == px { + return false + } + if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { + return true + } + } +} + +// unexpungeLocked ensures that the entry is not marked as expunged. +// +// If the entry was previously expunged, it must be added to the dirty map +// before m.mu is unlocked. +func (e *entry[V]) unexpungeLocked(px unsafe.Pointer) (wasExpunged bool) { + return atomic.CompareAndSwapPointer(&e.p, px, nil) +} + +// storeLocked unconditionally stores a value to the entry. +// +// The entry must be known not to be expunged. +func (e *entry[V]) storeLocked(i *V) { + atomic.StorePointer(&e.p, unsafe.Pointer(i)) +} + +// LoadOrStore returns the existing value for the key if present. +// Otherwise, it stores and returns the given value. +// The loaded result is true if the value was loaded, false if stored. +func (m *Map[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { + // Avoid locking if it's a clean hit. + read, _ := m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok { + actual, loaded, ok := e.tryLoadOrStore(value, m.expunged) + if ok { + return actual, loaded + } + } + + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + if e, ok := read.m[key]; ok { + if e.unexpungeLocked(m.expunged) { + m.dirty[key] = e + } + actual, loaded, _ = e.tryLoadOrStore(value, m.expunged) + } else if e, ok := m.dirty[key]; ok { + actual, loaded, _ = e.tryLoadOrStore(value, m.expunged) + m.missLocked() + } else { + if !read.amended { + // We're adding the first new key to the dirty map. + // Make sure it is allocated and mark the read-only map as incomplete. + m.dirtyLocked() + m.read.Store(readOnly[K, V]{m: read.m, amended: true}) + } + m.dirty[key] = newEntry[V](value) + actual, loaded = value, false + } + m.mu.Unlock() + + return actual, loaded +} + +// tryLoadOrStore atomically loads or stores a value if the entry is not +// expunged. +// +// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and +// returns with ok==false. +func (e *entry[V]) tryLoadOrStore(i V, px unsafe.Pointer) (actual V, loaded, ok bool) { + p := atomic.LoadPointer(&e.p) + if p == px { + var r V + return r, false, false + } + if p != nil { + return *(*V)(p), true, true + } + + // Copy the interface after the first load to make this method more amenable + // to escape analysis: if we hit the "load" path or the entry is expunged, we + // shouldn't bother heap-allocating. + ic := i + for { + if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { + return i, false, true + } + p = atomic.LoadPointer(&e.p) + if p == px { + var r V + return r, false, false + } + if p != nil { + return *(*V)(p), true, true + } + } +} + +// LoadAndDelete deletes the value for a key, returning the previous value if any. +// The loaded result reports whether the key was present. +func (m *Map[K, V]) LoadAndDelete(key K) (value V, loaded bool) { + read, _ := m.read.Load().(readOnly[K, V]) + e, ok := read.m[key] + if !ok && read.amended { + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + e, ok = read.m[key] + if !ok && read.amended { + e, ok = m.dirty[key] + delete(m.dirty, key) + // Regardless of whether the entry was present, record a miss: this key + // will take the slow path until the dirty map is promoted to the read + // map. + m.missLocked() + } + m.mu.Unlock() + } + if ok { + return e.delete(m.expunged) + } + var r V + return r, false +} + +// Delete deletes the value for a key. +func (m *Map[K, V]) Delete(key K) { + m.LoadAndDelete(key) +} + +func (e *entry[V]) delete(px unsafe.Pointer) (value V, ok bool) { + for { + p := atomic.LoadPointer(&e.p) + if p == nil || p == px { + var r V + return r, false + } + if atomic.CompareAndSwapPointer(&e.p, p, nil) { + return *(*V)(p), true + } + } +} + +// Range calls f sequentially for each key and value present in the map. +// If f returns false, range stops the iteration. +// +// Range does not necessarily correspond to any consistent snapshot of the Map's +// contents: no key will be visited more than once, but if the value for any key +// is stored or deleted concurrently (including by f), Range may reflect any +// mapping for that key from any point during the Range call. Range does not +// block other methods on the receiver; even f itself may call any method on m. +// +// Range may be O(N) with the number of elements in the map even if f returns +// false after a constant number of calls. +func (m *Map[K, V]) Range(f func(key K, value V) bool) { + // We need to be able to iterate over all the keys that were already + // present at the start of the call to Range. + // If read.amended is false, then read.m satisfies that property without + // requiring us to hold m.mu for a long time. + read, _ := m.read.Load().(readOnly[K, V]) + if read.amended { + // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) + // (assuming the caller does not break out early), so a call to Range + // amortizes an entire copy of the map: we can promote the dirty copy + // immediately! + m.mu.Lock() + read, _ = m.read.Load().(readOnly[K, V]) + if read.amended { + read = readOnly[K, V]{m: m.dirty} + m.read.Store(read) + m.dirty = nil + m.misses = 0 + } + m.mu.Unlock() + } + + for k, e := range read.m { + v, ok := e.load(m.expunged) + if !ok { + continue + } + if !f(k, v) { + break + } + } +} + +func (m *Map[K, V]) missLocked() { + m.misses++ + if m.misses < len(m.dirty) { + return + } + m.read.Store(readOnly[K, V]{m: m.dirty}) + m.dirty = nil + m.misses = 0 +} + +func (m *Map[K, V]) dirtyLocked() { + if m.dirty != nil { + return + } + + read, _ := m.read.Load().(readOnly[K, V]) + m.dirty = make(map[K]*entry[V], len(read.m)) + for k, e := range read.m { + if !e.tryExpungeLocked(m.expunged) { + m.dirty[k] = e + } + } +} + +func (e *entry[V]) tryExpungeLocked(px unsafe.Pointer) (isExpunged bool) { + p := atomic.LoadPointer(&e.p) + for p == nil { + if atomic.CompareAndSwapPointer(&e.p, nil, px) { + return true + } + p = atomic.LoadPointer(&e.p) + } + return p == px +}