1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-31 15:51:39 +02:00

Fix global counter key is incorrect & Add read lock when reading counter map to avoid memory problems

This commit is contained in:
石昌林 2022-06-20 19:16:53 +08:00
parent bd63b305dd
commit be5c901bea
2 changed files with 33 additions and 22 deletions

View file

@ -16,7 +16,7 @@ import (
) )
type CircuitBreaker struct { type CircuitBreaker struct {
sync.Mutex sync.RWMutex
Enabled bool Enabled bool
counters map[string]*int64 counters map[string]*int64
limitations map[string]int64 limitations map[string]int64
@ -110,7 +110,7 @@ func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request),
func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) { func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {
//bucket simultaneous request count //bucket simultaneous request count
bucketCountRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest) bucketCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest)
if bucketCountRollBack != nil { if bucketCountRollBack != nil {
rollback = append(rollback, bucketCountRollBack) rollback = append(rollback, bucketCountRollBack)
} }
@ -119,7 +119,7 @@ func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (
} }
//bucket simultaneous request content bytes //bucket simultaneous request content bytes
bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed) bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed)
if bucketContentLengthRollBack != nil { if bucketContentLengthRollBack != nil {
rollback = append(rollback, bucketContentLengthRollBack) rollback = append(rollback, bucketContentLengthRollBack)
} }
@ -128,7 +128,7 @@ func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (
} }
//global simultaneous request count //global simultaneous request count
globalCountRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeCount, 1, s3err.ErrTooManyRequest) globalCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest)
if globalCountRollBack != nil { if globalCountRollBack != nil {
rollback = append(rollback, globalCountRollBack) rollback = append(rollback, globalCountRollBack)
} }
@ -137,7 +137,7 @@ func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (
} }
//global simultaneous request content bytes //global simultaneous request content bytes
globalContentLengthRollBack, errCode := cb.loadCounterAndCompare("", action, s3_constants.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed) globalContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed)
if globalContentLengthRollBack != nil { if globalContentLengthRollBack != nil {
rollback = append(rollback, globalContentLengthRollBack) rollback = append(rollback, globalContentLengthRollBack)
} }
@ -147,11 +147,13 @@ func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (
return return
} }
func (cb *CircuitBreaker) loadCounterAndCompare(bucket, action, limitType string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) { func (cb *CircuitBreaker) loadCounterAndCompare(key string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {
key := s3_constants.Concat(bucket, action, limitType)
e = s3err.ErrNone e = s3err.ErrNone
if max, ok := cb.limitations[key]; ok { if max, ok := cb.limitations[key]; ok {
cb.RLock()
counter, exists := cb.counters[key] counter, exists := cb.counters[key]
cb.RUnlock()
if !exists { if !exists {
cb.Lock() cb.Lock()
counter, exists = cb.counters[key] counter, exists = cb.counters[key]
@ -171,7 +173,6 @@ func (cb *CircuitBreaker) loadCounterAndCompare(bucket, action, limitType string
f = func() { f = func() {
atomic.AddInt64(counter, -inc) atomic.AddInt64(counter, -inc)
} }
current = atomic.LoadInt64(counter)
if current > max { if current > max {
e = errCode e = errCode
return return

View file

@ -11,28 +11,38 @@ import (
) )
type TestLimitCase struct { type TestLimitCase struct {
actionName string actionName string
limitType string limitType string
bucketLimitValue int64 bucketLimitValue int64
globalLimitValue int64 globalLimitValue int64
routineCount int routineCount int
reqBytes int64
successCount int64 successCount int64
} }
var ( var (
bucket = "/test" bucket = "/test"
action = s3_constants.ACTION_READ action = s3_constants.ACTION_WRITE
fileSize int64 = 200
TestLimitCases = []*TestLimitCase{ TestLimitCases = []*TestLimitCase{
{action, s3_constants.LimitTypeCount, 5, 5, 6, 1024, 5},
{action, s3_constants.LimitTypeCount, 6, 6, 6, 1024, 6}, //bucket-LimitTypeCount
{action, s3_constants.LimitTypeCount, 5, 6, 6, 1024, 5}, {action, s3_constants.LimitTypeCount, 5, 6, 60, 5},
{action, s3_constants.LimitTypeBytes, 1024, 1024, 6, 200, 5}, {action, s3_constants.LimitTypeCount, 0, 6, 6, 0},
{action, s3_constants.LimitTypeBytes, 1200, 1200, 6, 200, 6},
{action, s3_constants.LimitTypeBytes, 11990, 11990, 60, 200, 59}, //global-LimitTypeCount
{action, s3_constants.LimitTypeBytes, 11790, 11990, 70, 200, 58}, {action, s3_constants.LimitTypeCount, 6, 5, 6, 5},
{action, s3_constants.LimitTypeCount, 6, 0, 6, 0},
//bucket-LimitTypeBytes
{action, s3_constants.LimitTypeBytes, 1000, 1020, 6, 5},
{action, s3_constants.LimitTypeBytes, 0, 1020, 6, 0},
//global-LimitTypeBytes
{action, s3_constants.LimitTypeBytes, 1020, 1000, 6, 5},
{action, s3_constants.LimitTypeBytes, 1020, 0, 6, 0},
} }
) )
@ -64,14 +74,14 @@ func TestLimit(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: tc.reqBytes}) successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: fileSize}, tc.actionName)
if successCount != tc.successCount { if successCount != tc.successCount {
t.Errorf("successCount not equal, expect=%d, actual=%d", tc.successCount, successCount) t.Errorf("successCount not equal, expect=%d, actual=%d, case: %v", tc.successCount, successCount, tc)
} }
} }
} }
func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request) int64 { func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request, action string) int64 {
var successCounter int64 var successCounter int64
resultCh := make(chan []func(), routineCount) resultCh := make(chan []func(), routineCount)
var wg sync.WaitGroup var wg sync.WaitGroup