1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-20 02:10:20 +02:00

add some unit tests and some code optimizes

This commit is contained in:
石昌林 2022-06-17 17:11:18 +08:00
parent 78b3728169
commit 37df209195
7 changed files with 295 additions and 110 deletions

View file

@ -1,11 +1,11 @@
package s3api
import (
"github.com/chrislusf/seaweedfs/weed/config"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_config"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -27,7 +27,7 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la
content := message.NewEntry.Content
_ = s3a.onIamConfigUpdate(dir, fileName, content)
_ = s3a.onCbConfigUpdate(dir, fileName, content)
_ = s3a.onCircuitBreakerConfigUpdate(dir, fileName, content)
return nil
}
@ -52,8 +52,8 @@ func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte)
}
//reload circuit breaker config
func (s3a *S3ApiServer) onCbConfigUpdate(dir, filename string, content []byte) error {
if dir == config.CircuitBreakerConfigDir && filename == config.CircuitBreakerConfigFile {
func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, content []byte) error {
if dir == s3_config.CircuitBreakerConfigDir && filename == s3_config.CircuitBreakerConfigFile {
if err := s3a.cb.LoadS3ApiConfigurationFromBytes(content); err != nil {
return err
}

View file

@ -1,11 +1,14 @@
package config
package s3_config
import "strings"
import (
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"strings"
)
var (
CircuitBreakerConfigDir = "/etc/s3"
CircuitBreakerConfigFile = "circuit_breaker.json"
AllowedActions = []string{"Read", "Write", "List", "Tagging", "Admin"}
AllowedActions = []string{s3_constants.ACTION_READ, s3_constants.ACTION_WRITE, s3_constants.ACTION_LIST, s3_constants.ACTION_TAGGING, s3_constants.ACTION_ADMIN}
LimitTypeCount = "count"
LimitTypeBytes = "bytes"
Separator = ":"

View file

@ -2,19 +2,21 @@ package s3api
import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/config"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_config"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"github.com/gorilla/mux"
"go.uber.org/atomic"
"net/http"
"sync"
)
type CircuitBreaker struct {
sync.Mutex
Enabled bool
counters map[string]*atomic.Int64
limitations map[string]int64
@ -26,18 +28,18 @@ func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker {
limitations: make(map[string]int64),
}
_ = pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
content, err := filer.ReadInsideFiler(client, config.CircuitBreakerConfigDir, config.CircuitBreakerConfigFile)
if err == nil {
err = cb.LoadS3ApiConfigurationFromBytes(content)
}
err := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
content, err := filer.ReadInsideFiler(client, s3_config.CircuitBreakerConfigDir, s3_config.CircuitBreakerConfigFile)
if err != nil {
glog.Warningf("load s3 circuit breaker config from filer: %v", err)
} else {
glog.V(2).Infof("load s3 circuit breaker config complete: %v", cb)
return fmt.Errorf("read S3 circuit breaker config: %v", err)
}
return err
return cb.LoadS3ApiConfigurationFromBytes(content)
})
if err != nil {
glog.Warningf("fail to load config: %v", err)
}
return cb
}
@ -47,13 +49,13 @@ func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error
glog.Warningf("unmarshal error: %v", err)
return fmt.Errorf("unmarshal error: %v", err)
}
if err := cb.loadCbCfg(cbCfg); err != nil {
if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil {
return err
}
return nil
}
func (cb *CircuitBreaker) loadCbCfg(cfg *s3_pb.S3CircuitBreakerConfig) error {
func (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error {
//global
globalEnabled := false
@ -71,7 +73,7 @@ func (cb *CircuitBreaker) loadCbCfg(cfg *s3_pb.S3CircuitBreakerConfig) error {
for bucket, cbOptions := range cfg.Buckets {
if cbOptions.Enabled {
for action, limit := range cbOptions.Actions {
limitations[config.Concat(bucket, action)] = limit
limitations[s3_config.Concat(bucket, action)] = limit
}
}
}
@ -80,7 +82,7 @@ func (cb *CircuitBreaker) loadCbCfg(cfg *s3_pb.S3CircuitBreakerConfig) error {
return nil
}
func (cb *CircuitBreaker) Check(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {
func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) {
return func(w http.ResponseWriter, r *http.Request) {
if !cb.Enabled {
f(w, r)
@ -90,7 +92,7 @@ func (cb *CircuitBreaker) Check(f func(w http.ResponseWriter, r *http.Request),
vars := mux.Vars(r)
bucket := vars["bucket"]
rollback, errCode := cb.check(r, bucket, action)
rollback, errCode := cb.limit(r, bucket, action)
defer func() {
for _, rf := range rollback {
rf()
@ -105,10 +107,10 @@ func (cb *CircuitBreaker) Check(f func(w http.ResponseWriter, r *http.Request),
}, Action(action)
}
func (cb *CircuitBreaker) check(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {
func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) {
//bucket simultaneous request count
bucketCountRollBack, errCode := cb.loadAndCompare(bucket, action, config.LimitTypeCount, 1, s3err.ErrTooManyRequest)
bucketCountRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_config.LimitTypeCount, 1, s3err.ErrTooManyRequest)
if bucketCountRollBack != nil {
rollback = append(rollback, bucketCountRollBack)
}
@ -117,7 +119,7 @@ func (cb *CircuitBreaker) check(r *http.Request, bucket string, action string) (
}
//bucket simultaneous request content bytes
bucketContentLengthRollBack, errCode := cb.loadAndCompare(bucket, action, config.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(bucket, action, s3_config.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
if bucketContentLengthRollBack != nil {
rollback = append(rollback, bucketContentLengthRollBack)
}
@ -126,7 +128,7 @@ func (cb *CircuitBreaker) check(r *http.Request, bucket string, action string) (
}
//global simultaneous request count
globalCountRollBack, errCode := cb.loadAndCompare("", action, config.LimitTypeCount, 1, s3err.ErrTooManyRequest)
globalCountRollBack, errCode := cb.loadCounterAndCompare("", action, s3_config.LimitTypeCount, 1, s3err.ErrTooManyRequest)
if globalCountRollBack != nil {
rollback = append(rollback, globalCountRollBack)
}
@ -135,7 +137,7 @@ func (cb *CircuitBreaker) check(r *http.Request, bucket string, action string) (
}
//global simultaneous request content bytes
globalContentLengthRollBack, errCode := cb.loadAndCompare("", action, config.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
globalContentLengthRollBack, errCode := cb.loadCounterAndCompare("", action, s3_config.LimitTypeBytes, r.ContentLength, s3err.ErrRequestBytesExceed)
if globalContentLengthRollBack != nil {
rollback = append(rollback, globalContentLengthRollBack)
}
@ -145,14 +147,19 @@ func (cb *CircuitBreaker) check(r *http.Request, bucket string, action string) (
return
}
func (cb CircuitBreaker) loadAndCompare(bucket, action, limitType string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {
key := config.Concat(bucket, action, limitType)
func (cb *CircuitBreaker) loadCounterAndCompare(bucket, action, limitType string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) {
key := s3_config.Concat(bucket, action, limitType)
e = s3err.ErrNone
if max, ok := cb.limitations[key]; ok {
counter, exists := cb.counters[key]
if !exists {
counter = atomic.NewInt64(0)
cb.counters[key] = counter
cb.Lock()
counter, exists = cb.counters[key]
if !exists {
counter = atomic.NewInt64(0)
cb.counters[key] = counter
}
cb.Unlock()
}
current := counter.Load()
if current+inc > max {
@ -164,7 +171,7 @@ func (cb CircuitBreaker) loadAndCompare(bucket, action, limitType string, inc in
counter.Sub(inc)
}
current = counter.Load()
if current+inc > max {
if current > max {
e = errCode
return
}

View file

@ -0,0 +1,97 @@
package s3api
import (
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_config"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
"go.uber.org/atomic"
"net/http"
"sync"
"testing"
)
type TestLimitCase struct {
actionName string
limitType string
bucketLimitValue int64
globalLimitValue int64
routineCount int
reqBytes int64
successCount int64
}
var (
bucket = "/test"
action = s3_constants.ACTION_READ
TestLimitCases = []*TestLimitCase{
{action, s3_config.LimitTypeCount, 5, 5, 6, 1024, 5},
{action, s3_config.LimitTypeCount, 6, 6, 6, 1024, 6},
{action, s3_config.LimitTypeCount, 5, 6, 6, 1024, 5},
{action, s3_config.LimitTypeBytes, 1024, 1024, 6, 200, 5},
{action, s3_config.LimitTypeBytes, 1200, 1200, 6, 200, 6},
{action, s3_config.LimitTypeBytes, 11990, 11990, 60, 200, 59},
{action, s3_config.LimitTypeBytes, 11790, 11990, 60, 200, 58},
}
)
func TestLimit(t *testing.T) {
for _, tc := range TestLimitCases {
circuitBreakerConfig := &s3_pb.S3CircuitBreakerConfig{
Global: &s3_pb.CbOptions{
Enabled: true,
Actions: map[string]int64{
s3_config.Concat(tc.actionName, tc.limitType): tc.globalLimitValue,
},
},
Buckets: map[string]*s3_pb.CbOptions{
bucket: {
Enabled: true,
Actions: map[string]int64{
s3_config.Concat(tc.actionName, tc.limitType): tc.bucketLimitValue,
},
},
},
}
circuitBreaker := &CircuitBreaker{
counters: make(map[string]*atomic.Int64),
limitations: make(map[string]int64),
}
err := circuitBreaker.loadCircuitBreakerConfig(circuitBreakerConfig)
if err != nil {
t.Fatal(err)
}
successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: tc.reqBytes})
if successCount != tc.successCount {
t.Errorf("successCount not equal, expect=%d, actual=%d", tc.successCount, successCount)
}
}
}
func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request) int64 {
var successCounter atomic.Int64
resultCh := make(chan []func(), routineCount)
var wg sync.WaitGroup
for i := 0; i < routineCount; i++ {
wg.Add(1)
go func() {
defer wg.Done()
rollbackFn, errCode := circuitBreaker.limit(r, bucket, action)
if errCode == s3err.ErrNone {
successCounter.Inc()
}
resultCh <- rollbackFn
}()
}
wg.Wait()
close(resultCh)
for fns := range resultCh {
for _, fn := range fns {
fn()
}
}
return successCounter.Load()
}

View file

@ -3,6 +3,7 @@ package s3api
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
"net"
"net/http"
@ -74,7 +75,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer
s3ApiServer.registerRouter(router)
go s3ApiServer.subscribeMetaEvents("s3", "/etc", time.Now().UnixNano())
go s3ApiServer.subscribeMetaEvents("s3", filer.DirectoryEtcRoot, time.Now().UnixNano())
return s3ApiServer, nil
}
@ -108,115 +109,115 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) {
// objects with query
// CopyObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}")
// ListMultipartUploads
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "")
// GetObjectTagging
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "")
// PutObjectTagging
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "")
// PutObjectACL
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
// PutObjectRetention
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "")
// PutObjectLockConfiguration
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "")
// GetObjectACL
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
// objects with query
// raw objects
// HeadObject
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.HeadObjectHandler, ACTION_READ)), "GET"))
bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET"))
// GetObject, but directory listing is not supported
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetObjectHandler, ACTION_READ)), "GET"))
bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET"))
// CopyObject
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY"))
bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY"))
// PutObject
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutObjectHandler, ACTION_WRITE)), "PUT"))
bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT"))
// DeleteObject
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE"))
bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE"))
// raw objects
// buckets with query
// DeleteMultipleObjects
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "")
// GetBucketACL
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "")
// PutBucketACL
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "")
// GetBucketPolicy
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "")
// PutBucketPolicy
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "")
// DeleteBucketPolicy
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "")
// GetBucketCors
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "")
// PutBucketCors
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "")
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "")
// DeleteBucketCors
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "")
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "")
// GetBucketLifecycleConfiguration
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "")
// PutBucketLifecycleConfiguration
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "")
bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "")
// DeleteBucketLifecycleConfiguration
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "")
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "")
// GetBucketLocation
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "")
// GetBucketRequestPayment
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "")
// ListObjectsV2
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2")
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2")
// buckets with query
// raw buckets
// PostPolicy
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST"))
bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST"))
// HeadBucket
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.HeadBucketHandler, ACTION_READ)), "GET"))
bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET"))
// PutBucket
bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT"))
// DeleteBucket
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.DeleteBucketHandler, ACTION_WRITE)), "DELETE"))
bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_WRITE)), "DELETE"))
// ListObjectsV1 (Legacy)
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Check(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST"))
bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST"))
// raw buckets

View file

@ -5,9 +5,9 @@ import (
"flag"
"fmt"
"github.com/alecthomas/units"
"github.com/chrislusf/seaweedfs/weed/config"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/pb/s3_pb"
"github.com/chrislusf/seaweedfs/weed/s3api/s3_config"
"io"
"strconv"
"strings"
@ -15,6 +15,8 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
var LoadConfig = loadConfig
func init() {
Commands = append(Commands, &commandS3CircuitBreaker{})
}
@ -23,7 +25,7 @@ type commandS3CircuitBreaker struct {
}
func (c *commandS3CircuitBreaker) Name() string {
return "s3.circuit.breaker"
return "s3.circuitBreaker"
}
func (c *commandS3CircuitBreaker) Help() string {
@ -31,42 +33,42 @@ func (c *commandS3CircuitBreaker) Help() string {
# examples
# add
s3.circuit.breaker -actions Read,Write -values 500,200 -global -enable -apply -type count
s3.circuit.breaker -actions Write -values 200MiB -global -enable -apply -type bytes
s3.circuit.breaker -actions Write -values 200MiB -bucket x,y,z -enable -apply -type bytes
s3.circuitBreaker -actions Read,Write -values 500,200 -global -enable -apply -type count
s3.circuitBreaker -actions Write -values 200MiB -global -enable -apply -type bytes
s3.circuitBreaker -actions Write -values 200MiB -bucket x,y,z -enable -apply -type bytes
#delete
s3.circuit.breaker -actions Write -bucket x,y,z -delete -apply -type bytes
s3.circuit.breaker -actions Write -bucket x,y,z -delete -apply
s3.circuit.breaker -actions Write -delete -apply
s3.circuitBreaker -actions Write -bucket x,y,z -delete -apply -type bytes
s3.circuitBreaker -actions Write -bucket x,y,z -delete -apply
s3.circuitBreaker -actions Write -delete -apply
`
}
func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
dir := config.CircuitBreakerConfigDir
file := config.CircuitBreakerConfigFile
dir := s3_config.CircuitBreakerConfigDir
file := s3_config.CircuitBreakerConfigFile
s3CircuitBreakerCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
buckets := s3CircuitBreakerCommand.String("buckets", "", "comma separated buckets names")
global := s3CircuitBreakerCommand.Bool("global", false, "comma separated buckets names")
buckets := s3CircuitBreakerCommand.String("buckets", "", "the bucket name(s) to configure, eg: -buckets x,y,z")
global := s3CircuitBreakerCommand.Bool("global", false, "configure global circuit breaker")
actions := s3CircuitBreakerCommand.String("actions", "", "comma separated actions names: Read,Write,List,Tagging,Admin")
limitType := s3CircuitBreakerCommand.String("type", "", "count|bytes simultaneous requests count")
values := s3CircuitBreakerCommand.String("values", "", "comma separated max values,Maximum number of simultaneous requests content length, support byte unit: eg: 1k, 10m, 1g")
enabled := s3CircuitBreakerCommand.Bool("enable", true, "enable or disable circuit breaker")
deleted := s3CircuitBreakerCommand.Bool("delete", false, "delete users, actions or access keys")
disabled := s3CircuitBreakerCommand.Bool("disable", false, "disable global or buckets circuit breaker")
deleted := s3CircuitBreakerCommand.Bool("delete", false, "delete circuit breaker config")
apply := s3CircuitBreakerCommand.Bool("apply", false, "update and apply current configuration")
if err = s3CircuitBreakerCommand.Parse(args); err != nil {
return nil
}
var buf bytes.Buffer
if err = commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer.ReadEntry(commandEnv.MasterClient, client, dir, file, &buf)
}); err != nil && err != filer_pb.ErrNotFound {
err = LoadConfig(commandEnv, dir, file, &buf)
if err != nil {
return err
}
@ -90,7 +92,7 @@ func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writ
deleteGlobalActions(cbCfg, cmdActions, limitType)
if cbCfg.Buckets != nil {
var allBuckets []string
for bucket, _ := range cbCfg.Buckets {
for bucket := range cbCfg.Buckets {
allBuckets = append(allBuckets, bucket)
}
deleteBucketsActions(allBuckets, cbCfg, cmdActions, limitType)
@ -108,7 +110,7 @@ func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writ
}
}
} else {
cmdBuckets, cmdActions, cmdValues, err := c.initActionsAndValues(buckets, actions, limitType, values, false)
cmdBuckets, cmdActions, cmdValues, err := c.initActionsAndValues(buckets, actions, limitType, values, *disabled)
if err != nil {
return err
}
@ -125,7 +127,7 @@ func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writ
cbOptions = &s3_pb.CbOptions{}
cbCfg.Buckets[bucket] = cbOptions
}
cbOptions.Enabled = *enabled
cbOptions.Enabled = !*disabled
if len(cmdActions) > 0 {
err = insertOrUpdateValues(cbOptions, cmdActions, cmdValues, limitType)
@ -146,7 +148,7 @@ func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writ
globalOptions = &s3_pb.CbOptions{Actions: make(map[string]int64, len(cmdActions))}
cbCfg.Global = globalOptions
}
globalOptions.Enabled = *enabled
globalOptions.Enabled = !*disabled
if len(cmdActions) > 0 {
err = insertOrUpdateValues(globalOptions, cmdActions, cmdValues, limitType)
@ -167,8 +169,8 @@ func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writ
return err
}
fmt.Fprintf(writer, string(buf.Bytes()))
fmt.Fprintln(writer)
_, _ = fmt.Fprintf(writer, string(buf.Bytes()))
_, _ = fmt.Fprintln(writer)
if *apply {
if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
@ -176,12 +178,20 @@ func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writ
}); err != nil {
return err
}
}
return nil
}
func loadConfig(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error {
if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer.ReadEntry(commandEnv.MasterClient, client, dir, file, buf)
}); err != nil && err != filer_pb.ErrNotFound {
return err
}
return nil
}
func insertOrUpdateValues(cbOptions *s3_pb.CbOptions, cmdActions []string, cmdValues []int64, limitType *string) error {
if len(*limitType) == 0 {
return fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed")
@ -193,7 +203,7 @@ func insertOrUpdateValues(cbOptions *s3_pb.CbOptions, cmdActions []string, cmdVa
if len(cmdValues) > 0 {
for i, action := range cmdActions {
cbOptions.Actions[config.Concat(action, *limitType)] = cmdValues[i]
cbOptions.Actions[s3_config.Concat(action, *limitType)] = cmdValues[i]
}
}
return nil
@ -213,7 +223,7 @@ func deleteBucketsActions(cmdBuckets []string, cbCfg *s3_pb.S3CircuitBreakerConf
if cbOption, ok := cbCfg.Buckets[bucket]; ok {
if len(cmdActions) > 0 && cbOption.Actions != nil {
for _, action := range cmdActions {
delete(cbOption.Actions, config.Concat(action, *limitType))
delete(cbOption.Actions, s3_config.Concat(action, *limitType))
}
}
@ -240,7 +250,7 @@ func deleteGlobalActions(cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []strin
return
} else {
for _, action := range cmdActions {
delete(globalOptions.Actions, config.Concat(action, *limitType))
delete(globalOptions.Actions, s3_config.Concat(action, *limitType))
}
}
@ -249,7 +259,7 @@ func deleteGlobalActions(cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []strin
}
}
func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitType, values *string, deleteOp bool) (cmdBuckets, cmdActions []string, cmdValues []int64, err error) {
func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitType, values *string, parseValues bool) (cmdBuckets, cmdActions []string, cmdValues []int64, err error) {
if len(*buckets) > 0 {
cmdBuckets = strings.Split(*buckets, ",")
}
@ -260,27 +270,27 @@ func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitTy
//check action valid
for _, action := range cmdActions {
var found bool
for _, allowedAction := range config.AllowedActions {
for _, allowedAction := range s3_config.AllowedActions {
if allowedAction == action {
found = true
}
}
if !found {
return nil, nil, nil, fmt.Errorf("value(%s) of flag[-action] not valid, allowed actions: %v", *actions, config.AllowedActions)
return nil, nil, nil, fmt.Errorf("value(%s) of flag[-action] not valid, allowed actions: %v", *actions, s3_config.AllowedActions)
}
}
}
if !deleteOp {
if !parseValues {
if len(cmdActions) < 0 {
for _, action := range config.AllowedActions {
for _, action := range s3_config.AllowedActions {
cmdActions = append(cmdActions, action)
}
}
if len(*limitType) > 0 {
switch *limitType {
case config.LimitTypeCount:
case s3_config.LimitTypeCount:
elements := strings.Split(*values, ",")
if len(cmdActions) != len(elements) {
if len(elements) != 1 || len(elements) == 0 {
@ -288,7 +298,7 @@ func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitTy
}
v, err := strconv.Atoi(elements[0])
if err != nil {
return nil, nil, nil, fmt.Errorf("value of -counts must be a legal number(s)")
return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)")
}
for range cmdActions {
cmdValues = append(cmdValues, int64(v))
@ -297,16 +307,16 @@ func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitTy
for _, value := range elements {
v, err := strconv.Atoi(value)
if err != nil {
return nil, nil, nil, fmt.Errorf("value of -counts must be a legal number(s)")
return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)")
}
cmdValues = append(cmdValues, int64(v))
}
}
case config.LimitTypeBytes:
case s3_config.LimitTypeBytes:
elements := strings.Split(*values, ",")
if len(cmdActions) != len(elements) {
if len(elements) != 1 || len(elements) == 0 {
return nil, nil, nil, fmt.Errorf("count of flag[-actions] and flag[-counts] not equal")
return nil, nil, nil, fmt.Errorf("values count of -actions and -values not equal")
}
v, err := units.ParseStrictBytes(elements[0])
if err != nil {

View file

@ -1,7 +1,74 @@
package shell
import "testing"
import (
"bytes"
"strings"
"testing"
)
type Case struct {
args []string
result string
}
var (
TestCases = []*Case{
//add circuit breaker config for global
{
args: strings.Split("-global -type count -actions Read,Write -values 500,200", " "),
result: "{\n \"global\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"500\",\n \"Write:count\": \"200\"\n }\n }\n}\n",
},
//disable global config
{
args: strings.Split("-global -disable", " "),
result: "{\n \"global\": {\n \"actions\": {\n \"Read:count\": \"500\",\n \"Write:count\": \"200\"\n }\n }\n}\n",
},
//add circuit breaker config for buckets x,y,z
{
args: strings.Split("-buckets x,y,z -type count -actions Read,Write -values 200,100", " "),
result: "{\n \"global\": {\n \"actions\": {\n \"Read:count\": \"500\",\n \"Write:count\": \"200\"\n }\n },\n \"buckets\": {\n \"x\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n },\n \"y\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n },\n \"z\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n }\n }\n}\n",
},
//disable circuit breaker config of x
{
args: strings.Split("-buckets x -disable", " "),
result: "{\n \"global\": {\n \"actions\": {\n \"Read:count\": \"500\",\n \"Write:count\": \"200\"\n }\n },\n \"buckets\": {\n \"x\": {\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n },\n \"y\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n },\n \"z\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n }\n }\n}\n",
},
//delete circuit breaker config of x
{
args: strings.Split("-buckets x -delete", " "),
result: "{\n \"global\": {\n \"actions\": {\n \"Read:count\": \"500\",\n \"Write:count\": \"200\"\n }\n },\n \"buckets\": {\n \"y\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n },\n \"z\": {\n \"enabled\": true,\n \"actions\": {\n \"Read:count\": \"200\",\n \"Write:count\": \"100\"\n }\n }\n }\n}\n",
},
//clear all circuit breaker config
{
args: strings.Split("-delete", " "),
result: "{\n\n}\n",
},
}
)
func TestCircuitBreakerShell(t *testing.T) {
var writeBuf bytes.Buffer
cmd := &commandS3CircuitBreaker{}
LoadConfig = func(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error {
_, err := buf.Write(writeBuf.Bytes())
if err != nil {
return err
}
writeBuf.Reset()
return nil
}
for i, tc := range TestCases {
err := cmd.Do(tc.args, nil, &writeBuf)
if err != nil {
t.Fatal(err)
}
if i != 0 {
result := writeBuf.String()
if result != tc.result {
t.Fatal("result of s3 circuit breaker shell command is unexpect!")
}
}
}
}