diff --git a/.github/workflows/depsreview.yml b/.github/workflows/depsreview.yml index 71619ae1b..b84b27d15 100644 --- a/.github/workflows/depsreview.yml +++ b/.github/workflows/depsreview.yml @@ -11,4 +11,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@dcd71f646680f2efd8db4afa5ad64fdcba30e748 - name: 'Dependency Review' - uses: actions/dependency-review-action@a9c83d3af6b9031e20feba03b904645bb23d1dab + uses: actions/dependency-review-action@1c59cdf2a9c7f29c90e8da32237eb04b81bad9f0 diff --git a/go.mod b/go.mod index 6ae8cca19..e45478d7f 100644 --- a/go.mod +++ b/go.mod @@ -3,14 +3,14 @@ module github.com/chrislusf/seaweedfs go 1.18 require ( - cloud.google.com/go v0.100.2 // indirect - cloud.google.com/go/pubsub v1.21.1 + cloud.google.com/go v0.102.0 // indirect + cloud.google.com/go/pubsub v1.22.2 cloud.google.com/go/storage v1.22.1 github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-storage-blob-go v0.15.0 github.com/OneOfOne/xxhash v1.2.8 github.com/Shopify/sarama v1.34.1 - github.com/aws/aws-sdk-go v1.44.32 + github.com/aws/aws-sdk-go v1.44.37 github.com/beorn7/perks v1.0.1 // indirect github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 github.com/bwmarrin/snowflake v0.3.0 @@ -45,7 +45,7 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.1 + github.com/google/btree v1.1.2 github.com/google/go-cmp v0.5.8 // indirect github.com/google/uuid v1.3.0 github.com/google/wire v0.5.0 // indirect @@ -98,7 +98,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/viper v1.12.0 github.com/streadway/amqp v1.0.0 - github.com/stretchr/testify v1.7.2 + github.com/stretchr/testify v1.7.3 github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 github.com/tidwall/gjson v1.14.1 @@ -154,7 +154,7 @@ require ( github.com/hashicorp/raft v1.3.9 github.com/hashicorp/raft-boltdb v0.0.0-20220329195025-15018e9b97e0 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 - github.com/ydb-platform/ydb-go-sdk/v3 v3.26.10 + github.com/ydb-platform/ydb-go-sdk/v3 v3.27.0 ) require ( diff --git a/go.sum b/go.sum index c9728bedd..e8ffdb1ca 100644 --- a/go.sum +++ b/go.sum @@ -30,8 +30,9 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= -cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y= cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0 h1:DAq3r8y4mDgyB/ZPJ9v/5VJNqjgJAxTn6ZYLlUywOu8= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -62,8 +63,8 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/pubsub v1.19.0/go.mod h1:/O9kmSe9bb9KRnIAWkzmqhPjHo6LtzGOBYd/kr06XSs= -cloud.google.com/go/pubsub v1.21.1 h1:ghu6wlm6WouITmmuwkxGG+6vNRXDaPdAjqLcRdsw3EQ= -cloud.google.com/go/pubsub v1.21.1/go.mod h1:u3XGeMBOBCIQLcxNzy14Svz88ZFS8vI250uDgIAQDSQ= +cloud.google.com/go/pubsub v1.22.2 h1:e6A4rhtMX4opff/jDWApl4HwLtsCdV9VULVfpFRp6eo= +cloud.google.com/go/pubsub v1.22.2/go.mod h1:LBHGrtgM7+SGKCDKQu2pKIRtGwbZyJvRDkMk0594xdU= cloud.google.com/go/secretmanager v1.3.0/go.mod h1:+oLTkouyiYiabAQNugCeTS3PAArGiMJuBqvJnJsyH+U= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= @@ -150,8 +151,8 @@ github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= -github.com/aws/aws-sdk-go v1.44.32 h1:x5hBtpY/02sgRL158zzTclcCLwh3dx3YlSl1rAH4Op0= -github.com/aws/aws-sdk-go v1.44.32/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.37 h1:KvDxCX6dfJeEDC77U5GPGSP0ErecmNnhDHFxw+NIvlI= +github.com/aws/aws-sdk-go v1.44.37/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= github.com/aws/aws-sdk-go-v2 v1.16.2 h1:fqlCk6Iy3bnCumtrLz9r3mJ/2gUT0pJ0wLFVIdWh+JA= github.com/aws/aws-sdk-go-v2 v1.16.2/go.mod h1:ytwTPBG6fXTZLxxeeCCWj2/EMYp/xDUgX+OET6TLNNU= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.1/go.mod h1:n8Bs1ElDD2wJ9kCRTczA83gYbBmjSwZp3umc6zF4EeM= @@ -401,8 +402,8 @@ github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.1 h1:OMJCfqwmbcwNihVCadalGMZiHclz5T0mRv12gnIaV0Q= -github.com/google/btree v1.1.1/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -820,8 +821,9 @@ github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0 h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -829,8 +831,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.7.3 h1:dAm0YRdRQlWojc3CrCRgPBzG5f941d0zvAKu7qY4e+I= +github.com/stretchr/testify v1.7.3/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203 h1:QVqDTf3h2WHt08YuiTGPZLls0Wq99X9bWd0Q5ZSBesM= github.com/stvp/tempredis v0.0.0-20181119212430-b82af8480203/go.mod h1:oqN97ltKNihBbwlX8dLpwxCl3+HnXKV/R0e+sRLd9C8= github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= @@ -880,8 +882,8 @@ github.com/ydb-platform/ydb-go-genproto v0.0.0-20220531094121-36ca6bddb9f7/go.mo github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2 h1:EYSI1kulnHb0H0zt3yOw4cRj4ABMSMGwNe43D+fX7e4= github.com/ydb-platform/ydb-go-sdk-auth-environ v0.1.2/go.mod h1:Xfjce+VMU9yJVr1lj60yK2fFPWjB4jr/4cp3K7cjzi4= github.com/ydb-platform/ydb-go-sdk/v3 v3.25.3/go.mod h1:PFizF/vJsdAgEwjK3DVSBD52kdmRkWfSIS2q2pA+e88= -github.com/ydb-platform/ydb-go-sdk/v3 v3.26.10 h1:2M6L2NX8l103qNDM1i97gh81Y2EJEAEyON3/N5zQV14= -github.com/ydb-platform/ydb-go-sdk/v3 v3.26.10/go.mod h1:1U2nGytADgY4/U7OykO3LfzeS3/zz4zqg7wlUA7XHfc= +github.com/ydb-platform/ydb-go-sdk/v3 v3.27.0 h1:paSdC12yRI19Vv9ej6qAjUu2/r/WaW/rzsUogd+lg34= +github.com/ydb-platform/ydb-go-sdk/v3 v3.27.0/go.mod h1:vXjmbeEAWlkVE5/ym3XHhtnWk7aDGGqFMKrfgwbRUkQ= github.com/ydb-platform/ydb-go-yc v0.8.3 h1:92UUUMsfvtMl6mho8eQ9lbkiPrF3a9CT+RrVRAKNRwo= github.com/ydb-platform/ydb-go-yc v0.8.3/go.mod h1:zUolAFGzJ5XG8uwiseTLr9Lapm7L7hdVdZgLSuv9FXE= github.com/ydb-platform/ydb-go-yc-metadata v0.5.2 h1:nMtixUijP0Z7iHJNT9fOL+dbmEzZxqU6Xk87ll7hqXg= @@ -1103,6 +1105,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1198,6 +1201,7 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220330033206-e17cdc41300f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1340,8 +1344,9 @@ google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/S google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= -google.golang.org/api v0.76.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko= google.golang.org/api v0.83.0 h1:pMvST+6v+46Gabac4zlJlalxZjCeRcepwg2EdBU+nCc= google.golang.org/api v0.83.0/go.mod h1:CNywQoj/AfhTw26ZWAa6LwOv+6WFxHmeLPZq2uncLZk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1442,10 +1447,11 @@ google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= -google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8 h1:qRu95HZ148xXw+XeZ3dvqe85PxH4X8+jIo0iRPKcEnM= google.golang.org/genproto v0.0.0-20220602131408-e326c6e8e9c8/go.mod h1:yKyY4AMRwFiC8yMMNaMi+RkCnjZJt9LoWuvhXjMs+To= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1523,8 +1529,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20220512140231-539c8e751b99/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/weed/command/update.go b/weed/command/update.go index 8e0a76016..2d0dc42ad 100644 --- a/weed/command/update.go +++ b/weed/command/update.go @@ -75,7 +75,7 @@ func init() { } var cmdUpdate = &Command{ - UsageLine: "weed update -dir=/path/to/dir -name=name -version=x.xx", + UsageLine: "update [-dir=/path/to/dir] [-name=name] [-version=x.xx]", Short: "get latest or specific version from https://github.com/chrislusf/seaweedfs", Long: `get latest or specific version from https://github.com/chrislusf/seaweedfs`, } diff --git a/weed/filer/filechunk_manifest.go b/weed/filer/filechunk_manifest.go index 091bbee5a..4eb657dfa 100644 --- a/weed/filer/filechunk_manifest.go +++ b/weed/filer/filechunk_manifest.go @@ -3,7 +3,6 @@ package filer import ( "bytes" "fmt" - "github.com/chrislusf/seaweedfs/weed/wdclient" "io" "math" "net/url" @@ -11,6 +10,8 @@ import ( "sync" "time" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "github.com/golang/protobuf/proto" "github.com/chrislusf/seaweedfs/weed/glog" @@ -63,14 +64,14 @@ func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chun resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk) if err != nil { - return chunks, nil, err + return dataChunks, nil, err } manifestChunks = append(manifestChunks, chunk) // recursive subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset) if subErr != nil { - return chunks, nil, subErr + return dataChunks, nil, subErr } dataChunks = append(dataChunks, subDataChunks...) manifestChunks = append(manifestChunks, subManifestChunks...) diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go index 208ef8095..48b344bf8 100644 --- a/weed/filer/filechunks.go +++ b/weed/filer/filechunks.go @@ -3,11 +3,12 @@ package filer import ( "bytes" "fmt" - "github.com/chrislusf/seaweedfs/weed/wdclient" - "golang.org/x/exp/slices" "math" "sync" + "github.com/chrislusf/seaweedfs/weed/wdclient" + "golang.org/x/exp/slices" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -248,6 +249,9 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles []VisibleInterval, err error) { chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset) + if err != nil { + return + } visibles2 := readResolvedChunks(chunks) diff --git a/weed/filer/s3iam_conf.go b/weed/filer/s3iam_conf.go index 55c976915..891bf925b 100644 --- a/weed/filer/s3iam_conf.go +++ b/weed/filer/s3iam_conf.go @@ -2,13 +2,12 @@ package filer import ( "bytes" - "github.com/chrislusf/seaweedfs/weed/pb/iam_pb" "github.com/golang/protobuf/jsonpb" "github.com/golang/protobuf/proto" "io" ) -func ParseS3ConfigurationFromBytes(content []byte, config *iam_pb.S3ApiConfiguration) error { +func ParseS3ConfigurationFromBytes[T proto.Message](content []byte, config T) error { if err := jsonpb.Unmarshal(bytes.NewBuffer(content), config); err != nil { return err } diff --git a/weed/pb/s3.proto b/weed/pb/s3.proto index 4f129b817..45a877fac 100644 --- a/weed/pb/s3.proto +++ b/weed/pb/s3.proto @@ -23,3 +23,13 @@ message S3ConfigureRequest { message S3ConfigureResponse { } + +message S3CircuitBreakerConfig { + S3CircuitBreakerOptions global=1; + map buckets= 2; +} + +message S3CircuitBreakerOptions { + bool enabled=1; + map actions = 2; +} diff --git a/weed/pb/s3_pb/s3.pb.go b/weed/pb/s3_pb/s3.pb.go index 53f174f02..c1bd23556 100644 --- a/weed/pb/s3_pb/s3.pb.go +++ b/weed/pb/s3_pb/s3.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.17.3 +// protoc-gen-go v1.28.0 +// protoc v3.21.1 // source: s3.proto package s3_pb @@ -105,6 +105,116 @@ func (*S3ConfigureResponse) Descriptor() ([]byte, []int) { return file_s3_proto_rawDescGZIP(), []int{1} } +type S3CircuitBreakerConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Global *S3CircuitBreakerOptions `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"` + Buckets map[string]*S3CircuitBreakerOptions `protobuf:"bytes,2,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *S3CircuitBreakerConfig) Reset() { + *x = S3CircuitBreakerConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_s3_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S3CircuitBreakerConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3CircuitBreakerConfig) ProtoMessage() {} + +func (x *S3CircuitBreakerConfig) ProtoReflect() protoreflect.Message { + mi := &file_s3_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3CircuitBreakerConfig.ProtoReflect.Descriptor instead. +func (*S3CircuitBreakerConfig) Descriptor() ([]byte, []int) { + return file_s3_proto_rawDescGZIP(), []int{2} +} + +func (x *S3CircuitBreakerConfig) GetGlobal() *S3CircuitBreakerOptions { + if x != nil { + return x.Global + } + return nil +} + +func (x *S3CircuitBreakerConfig) GetBuckets() map[string]*S3CircuitBreakerOptions { + if x != nil { + return x.Buckets + } + return nil +} + +type S3CircuitBreakerOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Actions map[string]int64 `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` +} + +func (x *S3CircuitBreakerOptions) Reset() { + *x = S3CircuitBreakerOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_s3_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *S3CircuitBreakerOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3CircuitBreakerOptions) ProtoMessage() {} + +func (x *S3CircuitBreakerOptions) ProtoReflect() protoreflect.Message { + mi := &file_s3_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3CircuitBreakerOptions.ProtoReflect.Descriptor instead. +func (*S3CircuitBreakerOptions) Descriptor() ([]byte, []int) { + return file_s3_proto_rawDescGZIP(), []int{3} +} + +func (x *S3CircuitBreakerOptions) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *S3CircuitBreakerOptions) GetActions() map[string]int64 { + if x != nil { + return x.Actions + } + return nil +} + var File_s3_proto protoreflect.FileDescriptor var file_s3_proto_rawDesc = []byte{ @@ -116,18 +226,47 @@ var file_s3_proto_rawDesc = []byte{ 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1a, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77, - 0x65, 0x65, 0x64, 0x53, 0x33, 0x12, 0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, - 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, - 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61, - 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53, - 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, - 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73, - 0x33, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x16, 0x53, 0x33, 0x43, + 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, + 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62, + 0x61, 0x6c, 0x12, 0x4b, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, + 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, + 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x1a, + 0x61, 0x0a, 0x0c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, + 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xbd, 0x01, 0x0a, 0x17, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, + 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18, + 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, + 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x53, 0x33, 0x12, + 0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, + 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, + 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, + 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x33, 0x5f, 0x70, 0x62, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -142,19 +281,27 @@ func file_s3_proto_rawDescGZIP() []byte { return file_s3_proto_rawDescData } -var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_s3_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_s3_proto_goTypes = []interface{}{ - (*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest - (*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse + (*S3ConfigureRequest)(nil), // 0: messaging_pb.S3ConfigureRequest + (*S3ConfigureResponse)(nil), // 1: messaging_pb.S3ConfigureResponse + (*S3CircuitBreakerConfig)(nil), // 2: messaging_pb.S3CircuitBreakerConfig + (*S3CircuitBreakerOptions)(nil), // 3: messaging_pb.S3CircuitBreakerOptions + nil, // 4: messaging_pb.S3CircuitBreakerConfig.BucketsEntry + nil, // 5: messaging_pb.S3CircuitBreakerOptions.ActionsEntry } var file_s3_proto_depIdxs = []int32{ - 0, // 0: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest - 1, // 1: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 3, // 0: messaging_pb.S3CircuitBreakerConfig.global:type_name -> messaging_pb.S3CircuitBreakerOptions + 4, // 1: messaging_pb.S3CircuitBreakerConfig.buckets:type_name -> messaging_pb.S3CircuitBreakerConfig.BucketsEntry + 5, // 2: messaging_pb.S3CircuitBreakerOptions.actions:type_name -> messaging_pb.S3CircuitBreakerOptions.ActionsEntry + 3, // 3: messaging_pb.S3CircuitBreakerConfig.BucketsEntry.value:type_name -> messaging_pb.S3CircuitBreakerOptions + 0, // 4: messaging_pb.SeaweedS3.Configure:input_type -> messaging_pb.S3ConfigureRequest + 1, // 5: messaging_pb.SeaweedS3.Configure:output_type -> messaging_pb.S3ConfigureResponse + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_s3_proto_init() } @@ -187,6 +334,30 @@ func file_s3_proto_init() { return nil } } + file_s3_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3CircuitBreakerConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_s3_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*S3CircuitBreakerOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } type x struct{} out := protoimpl.TypeBuilder{ @@ -194,7 +365,7 @@ func file_s3_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_s3_proto_rawDesc, NumEnums: 0, - NumMessages: 2, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, diff --git a/weed/s3api/auth_credentials_subscribe.go b/weed/s3api/auth_credentials_subscribe.go index 91fd5d830..f2bd94f56 100644 --- a/weed/s3api/auth_credentials_subscribe.go +++ b/weed/s3api/auth_credentials_subscribe.go @@ -5,6 +5,7 @@ import ( "github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/pb" "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/util" ) @@ -22,12 +23,11 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la if message.NewParentPath != "" { dir = message.NewParentPath } - if dir == filer.IamConfigDirecotry && message.NewEntry.Name == filer.IamIdentityFile { - if err := s3a.iam.LoadS3ApiConfigurationFromBytes(message.NewEntry.Content); err != nil { - return err - } - glog.V(0).Infof("updated %s/%s", filer.IamConfigDirecotry, filer.IamIdentityFile) - } + fileName := message.NewEntry.Name + content := message.NewEntry.Content + + _ = s3a.onIamConfigUpdate(dir, fileName, content) + _ = s3a.onCircuitBreakerConfigUpdate(dir, fileName, content) return nil } @@ -38,5 +38,26 @@ func (s3a *S3ApiServer) subscribeMetaEvents(clientName string, prefix string, la glog.V(0).Infof("iam follow metadata changes: %v", err) return true }) - +} + +//reload iam config +func (s3a *S3ApiServer) onIamConfigUpdate(dir, filename string, content []byte) error { + if dir == filer.IamConfigDirecotry && filename == filer.IamIdentityFile { + if err := s3a.iam.LoadS3ApiConfigurationFromBytes(content); err != nil { + return err + } + glog.V(0).Infof("updated %s/%s", dir, filename) + } + return nil +} + +//reload circuit breaker config +func (s3a *S3ApiServer) onCircuitBreakerConfigUpdate(dir, filename string, content []byte) error { + if dir == s3_constants.CircuitBreakerConfigDir && filename == s3_constants.CircuitBreakerConfigFile { + if err := s3a.cb.LoadS3ApiConfigurationFromBytes(content); err != nil { + return err + } + glog.V(0).Infof("updated %s/%s", dir, filename) + } + return nil } diff --git a/weed/s3api/s3_constants/s3_config.go b/weed/s3api/s3_constants/s3_config.go new file mode 100644 index 000000000..0fa5b26f4 --- /dev/null +++ b/weed/s3api/s3_constants/s3_config.go @@ -0,0 +1,18 @@ +package s3_constants + +import ( + "strings" +) + +var ( + CircuitBreakerConfigDir = "/etc/s3" + CircuitBreakerConfigFile = "circuit_breaker.json" + AllowedActions = []string{ACTION_READ, ACTION_WRITE, ACTION_LIST, ACTION_TAGGING, ACTION_ADMIN} + LimitTypeCount = "Count" + LimitTypeBytes = "MB" + Separator = ":" +) + +func Concat(elements ...string) string { + return strings.Join(elements, Separator) +} diff --git a/weed/s3api/s3api_circuit_breaker.go b/weed/s3api/s3api_circuit_breaker.go new file mode 100644 index 000000000..111b404c7 --- /dev/null +++ b/weed/s3api/s3api_circuit_breaker.go @@ -0,0 +1,183 @@ +package s3api + +import ( + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/pb" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "github.com/gorilla/mux" + "net/http" + "sync" + "sync/atomic" +) + +type CircuitBreaker struct { + sync.RWMutex + Enabled bool + counters map[string]*int64 + limitations map[string]int64 +} + +func NewCircuitBreaker(option *S3ApiServerOption) *CircuitBreaker { + cb := &CircuitBreaker{ + counters: make(map[string]*int64), + limitations: make(map[string]int64), + } + + err := pb.WithFilerClient(false, option.Filer, option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error { + content, err := filer.ReadInsideFiler(client, s3_constants.CircuitBreakerConfigDir, s3_constants.CircuitBreakerConfigFile) + if err != nil { + return fmt.Errorf("read S3 circuit breaker config: %v", err) + } + return cb.LoadS3ApiConfigurationFromBytes(content) + }) + + if err != nil { + glog.Warningf("fail to load config: %v", err) + } + + return cb +} + +func (cb *CircuitBreaker) LoadS3ApiConfigurationFromBytes(content []byte) error { + cbCfg := &s3_pb.S3CircuitBreakerConfig{} + if err := filer.ParseS3ConfigurationFromBytes(content, cbCfg); err != nil { + glog.Warningf("unmarshal error: %v", err) + return fmt.Errorf("unmarshal error: %v", err) + } + if err := cb.loadCircuitBreakerConfig(cbCfg); err != nil { + return err + } + return nil +} + +func (cb *CircuitBreaker) loadCircuitBreakerConfig(cfg *s3_pb.S3CircuitBreakerConfig) error { + + //global + globalEnabled := false + globalOptions := cfg.Global + limitations := make(map[string]int64) + if globalOptions != nil && globalOptions.Enabled && len(globalOptions.Actions) > 0 { + globalEnabled = globalOptions.Enabled + for action, limit := range globalOptions.Actions { + limitations[action] = limit + } + } + cb.Enabled = globalEnabled + + //buckets + for bucket, cbOptions := range cfg.Buckets { + if cbOptions.Enabled { + for action, limit := range cbOptions.Actions { + limitations[s3_constants.Concat(bucket, action)] = limit + } + } + } + + cb.limitations = limitations + return nil +} + +func (cb *CircuitBreaker) Limit(f func(w http.ResponseWriter, r *http.Request), action string) (http.HandlerFunc, Action) { + return func(w http.ResponseWriter, r *http.Request) { + if !cb.Enabled { + f(w, r) + return + } + + vars := mux.Vars(r) + bucket := vars["bucket"] + + rollback, errCode := cb.limit(r, bucket, action) + defer func() { + for _, rf := range rollback { + rf() + } + }() + + if errCode == s3err.ErrNone { + f(w, r) + return + } + s3err.WriteErrorResponse(w, r, errCode) + }, Action(action) +} + +func (cb *CircuitBreaker) limit(r *http.Request, bucket string, action string) (rollback []func(), errCode s3err.ErrorCode) { + + //bucket simultaneous request count + bucketCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest) + if bucketCountRollBack != nil { + rollback = append(rollback, bucketCountRollBack) + } + if errCode != s3err.ErrNone { + return + } + + //bucket simultaneous request content bytes + bucketContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(bucket, action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed) + if bucketContentLengthRollBack != nil { + rollback = append(rollback, bucketContentLengthRollBack) + } + if errCode != s3err.ErrNone { + return + } + + //global simultaneous request count + globalCountRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeCount), 1, s3err.ErrTooManyRequest) + if globalCountRollBack != nil { + rollback = append(rollback, globalCountRollBack) + } + if errCode != s3err.ErrNone { + return + } + + //global simultaneous request content bytes + globalContentLengthRollBack, errCode := cb.loadCounterAndCompare(s3_constants.Concat(action, s3_constants.LimitTypeBytes), r.ContentLength, s3err.ErrRequestBytesExceed) + if globalContentLengthRollBack != nil { + rollback = append(rollback, globalContentLengthRollBack) + } + if errCode != s3err.ErrNone { + return + } + return +} + +func (cb *CircuitBreaker) loadCounterAndCompare(key string, inc int64, errCode s3err.ErrorCode) (f func(), e s3err.ErrorCode) { + e = s3err.ErrNone + if max, ok := cb.limitations[key]; ok { + cb.RLock() + counter, exists := cb.counters[key] + cb.RUnlock() + + if !exists { + cb.Lock() + counter, exists = cb.counters[key] + if !exists { + var newCounter int64 + counter = &newCounter + cb.counters[key] = counter + } + cb.Unlock() + } + current := atomic.LoadInt64(counter) + if current+inc > max { + e = errCode + return + } else { + current := atomic.AddInt64(counter, inc) + f = func() { + atomic.AddInt64(counter, -inc) + } + if current > max { + e = errCode + return + } + } + } + return +} diff --git a/weed/s3api/s3api_circuit_breaker_test.go b/weed/s3api/s3api_circuit_breaker_test.go new file mode 100644 index 000000000..5848cf164 --- /dev/null +++ b/weed/s3api/s3api_circuit_breaker_test.go @@ -0,0 +1,107 @@ +package s3api + +import ( + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + "net/http" + "sync" + "sync/atomic" + "testing" +) + +type TestLimitCase struct { + actionName string + + limitType string + bucketLimitValue int64 + globalLimitValue int64 + + routineCount int + successCount int64 +} + +var ( + bucket = "/test" + action = s3_constants.ACTION_WRITE + fileSize int64 = 200 + + TestLimitCases = []*TestLimitCase{ + + //bucket-LimitTypeCount + {action, s3_constants.LimitTypeCount, 5, 6, 60, 5}, + {action, s3_constants.LimitTypeCount, 0, 6, 6, 0}, + + //global-LimitTypeCount + {action, s3_constants.LimitTypeCount, 6, 5, 6, 5}, + {action, s3_constants.LimitTypeCount, 6, 0, 6, 0}, + + //bucket-LimitTypeBytes + {action, s3_constants.LimitTypeBytes, 1000, 1020, 6, 5}, + {action, s3_constants.LimitTypeBytes, 0, 1020, 6, 0}, + + //global-LimitTypeBytes + {action, s3_constants.LimitTypeBytes, 1020, 1000, 6, 5}, + {action, s3_constants.LimitTypeBytes, 1020, 0, 6, 0}, + } +) + +func TestLimit(t *testing.T) { + for _, tc := range TestLimitCases { + circuitBreakerConfig := &s3_pb.S3CircuitBreakerConfig{ + Global: &s3_pb.S3CircuitBreakerOptions{ + Enabled: true, + Actions: map[string]int64{ + s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue, + s3_constants.Concat(tc.actionName, tc.limitType): tc.globalLimitValue, + }, + }, + Buckets: map[string]*s3_pb.S3CircuitBreakerOptions{ + bucket: { + Enabled: true, + Actions: map[string]int64{ + s3_constants.Concat(tc.actionName, tc.limitType): tc.bucketLimitValue, + }, + }, + }, + } + circuitBreaker := &CircuitBreaker{ + counters: make(map[string]*int64), + limitations: make(map[string]int64), + } + err := circuitBreaker.loadCircuitBreakerConfig(circuitBreakerConfig) + if err != nil { + t.Fatal(err) + } + + successCount := doLimit(circuitBreaker, tc.routineCount, &http.Request{ContentLength: fileSize}, tc.actionName) + if successCount != tc.successCount { + t.Errorf("successCount not equal, expect=%d, actual=%d, case: %v", tc.successCount, successCount, tc) + } + } +} + +func doLimit(circuitBreaker *CircuitBreaker, routineCount int, r *http.Request, action string) int64 { + var successCounter int64 + resultCh := make(chan []func(), routineCount) + var wg sync.WaitGroup + for i := 0; i < routineCount; i++ { + wg.Add(1) + go func() { + defer wg.Done() + rollbackFn, errCode := circuitBreaker.limit(r, bucket, action) + if errCode == s3err.ErrNone { + atomic.AddInt64(&successCounter, 1) + } + resultCh <- rollbackFn + }() + } + wg.Wait() + close(resultCh) + for fns := range resultCh { + for _, fn := range fns { + fn() + } + } + return successCounter +} diff --git a/weed/s3api/s3api_object_multipart_handlers.go b/weed/s3api/s3api_object_multipart_handlers.go index e650c9156..768f4d180 100644 --- a/weed/s3api/s3api_object_multipart_handlers.go +++ b/weed/s3api/s3api_object_multipart_handlers.go @@ -4,16 +4,17 @@ import ( "crypto/sha1" "encoding/xml" "fmt" - "github.com/chrislusf/seaweedfs/weed/glog" - "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" - "github.com/chrislusf/seaweedfs/weed/s3api/s3err" - weed_server "github.com/chrislusf/seaweedfs/weed/server" "io" "net/http" "net/url" "strconv" "strings" + "github.com/chrislusf/seaweedfs/weed/glog" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "github.com/chrislusf/seaweedfs/weed/s3api/s3err" + weed_server "github.com/chrislusf/seaweedfs/weed/server" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" ) @@ -119,7 +120,9 @@ func (s3a *S3ApiServer) AbortMultipartUploadHandler(w http.ResponseWriter, r *ht glog.V(2).Info("AbortMultipartUploadHandler", string(s3err.EncodeXMLResponse(response))) - writeSuccessResponseXML(w, r, response) + //https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html + s3err.WriteXMLResponse(w, r, http.StatusNoContent, response) + s3err.PostLog(r, http.StatusNoContent, s3err.ErrNone) } diff --git a/weed/s3api/s3api_server.go b/weed/s3api/s3api_server.go index 657fa8171..cc5ca5231 100644 --- a/weed/s3api/s3api_server.go +++ b/weed/s3api/s3api_server.go @@ -3,13 +3,13 @@ package s3api import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" "net" "net/http" "strings" "time" - "github.com/chrislusf/seaweedfs/weed/filer" "github.com/chrislusf/seaweedfs/weed/pb" . "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" "github.com/chrislusf/seaweedfs/weed/s3api/s3err" @@ -35,6 +35,7 @@ type S3ApiServer struct { s3_pb.UnimplementedSeaweedS3Server option *S3ApiServerOption iam *IdentityAccessManagement + cb *CircuitBreaker randomClientId int32 filerGuard *security.Guard client *http.Client @@ -55,6 +56,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer iam: NewIdentityAccessManagement(option), randomClientId: util.RandomInt32(), filerGuard: security.NewGuard([]string{}, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec), + cb: NewCircuitBreaker(option), } if option.LocalFilerSocket == nil || *option.LocalFilerSocket == "" { s3ApiServer.client = &http.Client{Transport: &http.Transport{ @@ -73,7 +75,7 @@ func NewS3ApiServer(router *mux.Router, option *S3ApiServerOption) (s3ApiServer s3ApiServer.registerRouter(router) - go s3ApiServer.subscribeMetaEvents("s3", filer.IamConfigDirecotry+"/"+filer.IamIdentityFile, time.Now().UnixNano()) + go s3ApiServer.subscribeMetaEvents("s3", filer.DirectoryEtcRoot, time.Now().UnixNano()) return s3ApiServer, nil } @@ -107,115 +109,115 @@ func (s3a *S3ApiServer) registerRouter(router *mux.Router) { // objects with query // CopyObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", `.*?(\/|%2F).*?`).HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // PutObjectPart - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectPartHandler, ACTION_WRITE), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectPartHandler, ACTION_WRITE)), "PUT")).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // CompleteMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.CompleteMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CompleteMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploadId", "{uploadId:.*}") // NewMultipartUpload - bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.NewMultipartUploadHandler, ACTION_WRITE), "POST")).Queries("uploads", "") + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.NewMultipartUploadHandler, ACTION_WRITE)), "POST")).Queries("uploads", "") // AbortMultipartUpload - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.AbortMultipartUploadHandler, ACTION_WRITE), "DELETE")).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.AbortMultipartUploadHandler, ACTION_WRITE)), "DELETE")).Queries("uploadId", "{uploadId:.*}") // ListObjectParts - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectPartsHandler, ACTION_READ), "GET")).Queries("uploadId", "{uploadId:.*}") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectPartsHandler, ACTION_READ)), "GET")).Queries("uploadId", "{uploadId:.*}") // ListMultipartUploads - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListMultipartUploadsHandler, ACTION_READ), "GET")).Queries("uploads", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListMultipartUploadsHandler, ACTION_READ)), "GET")).Queries("uploads", "") // GetObjectTagging - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectTaggingHandler, ACTION_READ), "GET")).Queries("tagging", "") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectTaggingHandler, ACTION_READ)), "GET")).Queries("tagging", "") // PutObjectTagging - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectTaggingHandler, ACTION_TAGGING), "PUT")).Queries("tagging", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectTaggingHandler, ACTION_TAGGING)), "PUT")).Queries("tagging", "") // DeleteObjectTagging - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING), "DELETE")).Queries("tagging", "") + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectTaggingHandler, ACTION_TAGGING)), "DELETE")).Queries("tagging", "") // PutObjectACL - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "") // PutObjectRetention - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectRetentionHandler, ACTION_WRITE), "PUT")).Queries("retention", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectRetentionHandler, ACTION_WRITE)), "PUT")).Queries("retention", "") // PutObjectLegalHold - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLegalHoldHandler, ACTION_WRITE), "PUT")).Queries("legal-hold", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLegalHoldHandler, ACTION_WRITE)), "PUT")).Queries("legal-hold", "") // PutObjectLockConfiguration - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE), "PUT")).Queries("object-lock", "") + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectLockConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("object-lock", "") // GetObjectACL - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectAclHandler, ACTION_READ), "GET")).Queries("acl", "") + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectAclHandler, ACTION_READ)), "GET")).Queries("acl", "") // objects with query // raw objects // HeadObject - bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.HeadObjectHandler, ACTION_READ), "GET")) + bucket.Methods("HEAD").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadObjectHandler, ACTION_READ)), "GET")) // GetObject, but directory listing is not supported - bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.GetObjectHandler, ACTION_READ), "GET")) + bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetObjectHandler, ACTION_READ)), "GET")) // CopyObject - bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.CopyObjectHandler, ACTION_WRITE), "COPY")) + bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.CopyObjectHandler, ACTION_WRITE)), "COPY")) // PutObject - bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.PutObjectHandler, ACTION_WRITE), "PUT")) + bucket.Methods("PUT").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutObjectHandler, ACTION_WRITE)), "PUT")) // DeleteObject - bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteObjectHandler, ACTION_WRITE), "DELETE")) + bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteObjectHandler, ACTION_WRITE)), "DELETE")) // raw objects // buckets with query // DeleteMultipleObjects - bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE), "DELETE")).Queries("delete", "") + bucket.Methods("POST").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteMultipleObjectsHandler, ACTION_WRITE)), "DELETE")).Queries("delete", "") // GetBucketACL - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketAclHandler, ACTION_READ), "GET")).Queries("acl", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketAclHandler, ACTION_READ)), "GET")).Queries("acl", "") // PutBucketACL - bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketAclHandler, ACTION_WRITE), "PUT")).Queries("acl", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketAclHandler, ACTION_WRITE)), "PUT")).Queries("acl", "") // GetBucketPolicy - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketPolicyHandler, ACTION_READ), "GET")).Queries("policy", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketPolicyHandler, ACTION_READ)), "GET")).Queries("policy", "") // PutBucketPolicy - bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketPolicyHandler, ACTION_WRITE), "PUT")).Queries("policy", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketPolicyHandler, ACTION_WRITE)), "PUT")).Queries("policy", "") // DeleteBucketPolicy - bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketPolicyHandler, ACTION_WRITE), "DELETE")).Queries("policy", "") + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketPolicyHandler, ACTION_WRITE)), "DELETE")).Queries("policy", "") // GetBucketCors - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketCorsHandler, ACTION_READ), "GET")).Queries("cors", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketCorsHandler, ACTION_READ)), "GET")).Queries("cors", "") // PutBucketCors - bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketCorsHandler, ACTION_WRITE), "PUT")).Queries("cors", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketCorsHandler, ACTION_WRITE)), "PUT")).Queries("cors", "") // DeleteBucketCors - bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketCorsHandler, ACTION_WRITE), "DELETE")).Queries("cors", "") + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketCorsHandler, ACTION_WRITE)), "DELETE")).Queries("cors", "") // GetBucketLifecycleConfiguration - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ), "GET")).Queries("lifecycle", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLifecycleConfigurationHandler, ACTION_READ)), "GET")).Queries("lifecycle", "") // PutBucketLifecycleConfiguration - bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE), "PUT")).Queries("lifecycle", "") + bucket.Methods("PUT").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PutBucketLifecycleConfigurationHandler, ACTION_WRITE)), "PUT")).Queries("lifecycle", "") // DeleteBucketLifecycleConfiguration - bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE), "DELETE")).Queries("lifecycle", "") + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketLifecycleHandler, ACTION_WRITE)), "DELETE")).Queries("lifecycle", "") // GetBucketLocation - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketLocationHandler, ACTION_READ), "GET")).Queries("location", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketLocationHandler, ACTION_READ)), "GET")).Queries("location", "") // GetBucketRequestPayment - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.GetBucketRequestPaymentHandler, ACTION_READ), "GET")).Queries("requestPayment", "") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.GetBucketRequestPaymentHandler, ACTION_READ)), "GET")).Queries("requestPayment", "") // ListObjectsV2 - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV2Handler, ACTION_LIST), "LIST")).Queries("list-type", "2") + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV2Handler, ACTION_LIST)), "LIST")).Queries("list-type", "2") // buckets with query // raw buckets // PostPolicy - bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.PostPolicyBucketHandler, ACTION_WRITE), "POST")) + bucket.Methods("POST").HeadersRegexp("Content-Type", "multipart/form-data*").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.PostPolicyBucketHandler, ACTION_WRITE)), "POST")) // HeadBucket - bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.HeadBucketHandler, ACTION_READ), "GET")) + bucket.Methods("HEAD").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.HeadBucketHandler, ACTION_READ)), "GET")) // PutBucket bucket.Methods("PUT").HandlerFunc(track(s3a.PutBucketHandler, "PUT")) // DeleteBucket - bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.DeleteBucketHandler, ACTION_WRITE), "DELETE")) + bucket.Methods("DELETE").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.DeleteBucketHandler, ACTION_WRITE)), "DELETE")) // ListObjectsV1 (Legacy) - bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.ListObjectsV1Handler, ACTION_LIST), "LIST")) + bucket.Methods("GET").HandlerFunc(track(s3a.iam.Auth(s3a.cb.Limit(s3a.ListObjectsV1Handler, ACTION_LIST)), "LIST")) // raw buckets diff --git a/weed/s3api/s3err/s3api_errors.go b/weed/s3api/s3err/s3api_errors.go index 2e93f49cb..57f269a2e 100644 --- a/weed/s3api/s3err/s3api_errors.go +++ b/weed/s3api/s3err/s3api_errors.go @@ -104,6 +104,9 @@ const ( ErrExistingObjectIsDirectory ErrExistingObjectIsFile + + ErrTooManyRequest + ErrRequestBytesExceed ) // error code to APIError structure, these fields carry respective @@ -401,6 +404,16 @@ var errorCodeResponse = map[ErrorCode]APIError{ Description: "Existing Object is a file.", HTTPStatusCode: http.StatusConflict, }, + ErrTooManyRequest: { + Code: "ErrTooManyRequest", + Description: "Too many simultaneous request count", + HTTPStatusCode: http.StatusTooManyRequests, + }, + ErrRequestBytesExceed: { + Code: "ErrRequestBytesExceed", + Description: "Simultaneous request bytes exceed limitations", + HTTPStatusCode: http.StatusTooManyRequests, + }, } // GetAPIError provides API Error for input API error code. diff --git a/weed/server/volume_grpc_copy.go b/weed/server/volume_grpc_copy.go index e3ec5b724..b4bc850e2 100644 --- a/weed/server/volume_grpc_copy.go +++ b/weed/server/volume_grpc_copy.go @@ -3,6 +3,8 @@ package weed_server import ( "context" "fmt" + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/backend" "io" "math" "os" @@ -78,6 +80,28 @@ func (vs *VolumeServer) VolumeCopy(req *volume_server_pb.VolumeCopyRequest, stre } }() + var preallocateSize int64 + if grpcErr := pb.WithMasterClient(false, vs.GetMaster(), vs.grpcDialOption, func(client master_pb.SeaweedClient) error { + resp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{}) + if err != nil { + return fmt.Errorf("get master %s configuration: %v", vs.GetMaster(), err) + } + if resp.VolumePreallocate { + preallocateSize = int64(resp.VolumeSizeLimitMB) * (1 << 20) + } + return nil + }); grpcErr != nil { + glog.V(0).Infof("connect to %s: %v", vs.GetMaster(), grpcErr) + } + + if preallocateSize > 0 { + volumeFile := dataBaseFileName + ".dat" + _, err := backend.CreateVolumeFile(volumeFile, preallocateSize, 0) + if err != nil { + return fmt.Errorf("create volume file %s: %v", volumeFile, err) + } + } + // println("source:", volFileInfoResp.String()) copyResponse := &volume_server_pb.VolumeCopyResponse{} reportInterval := int64(1024 * 1024 * 128) diff --git a/weed/shell/command_s3_circuitbreaker.go b/weed/shell/command_s3_circuitbreaker.go new file mode 100644 index 000000000..7e11153bf --- /dev/null +++ b/weed/shell/command_s3_circuitbreaker.go @@ -0,0 +1,358 @@ +package shell + +import ( + "bytes" + "flag" + "fmt" + "github.com/chrislusf/seaweedfs/weed/filer" + "github.com/chrislusf/seaweedfs/weed/pb/s3_pb" + "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants" + "io" + "strconv" + "strings" + + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +var LoadConfig = loadConfig + +func init() { + Commands = append(Commands, &commandS3CircuitBreaker{}) +} + +type commandS3CircuitBreaker struct { +} + +func (c *commandS3CircuitBreaker) Name() string { + return "s3.circuitBreaker" +} + +func (c *commandS3CircuitBreaker) Help() string { + return `configure and apply s3 circuit breaker options for each bucket + + # examples + # add circuit breaker config for global + s3.circuitBreaker -global -type count -actions Read,Write -values 500,200 -apply + + # disable global config + s3.circuitBreaker -global -disable -apply + + # add circuit breaker config for buckets x,y,z + s3.circuitBreaker -buckets x,y,z -type count -actions Read,Write -values 200,100 -apply + + # disable circuit breaker config of x + s3.circuitBreaker -buckets x -disable -apply + + # delete circuit breaker config of x + s3.circuitBreaker -buckets x -delete -apply + + # clear all circuit breaker config + s3.circuitBreaker -delete -apply + ` +} + +func (c *commandS3CircuitBreaker) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) { + dir := s3_constants.CircuitBreakerConfigDir + file := s3_constants.CircuitBreakerConfigFile + + s3CircuitBreakerCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + buckets := s3CircuitBreakerCommand.String("buckets", "", "the bucket name(s) to configure, eg: -buckets x,y,z") + global := s3CircuitBreakerCommand.Bool("global", false, "configure global circuit breaker") + + actions := s3CircuitBreakerCommand.String("actions", "", "comma separated actions names: Read,Write,List,Tagging,Admin") + limitType := s3CircuitBreakerCommand.String("type", "", "'Count' or 'MB'; Count represents the number of simultaneous requests, and MB represents the content size of all simultaneous requests") + values := s3CircuitBreakerCommand.String("values", "", "comma separated values") + + disabled := s3CircuitBreakerCommand.Bool("disable", false, "disable global or buckets circuit breaker") + deleted := s3CircuitBreakerCommand.Bool("delete", false, "delete circuit breaker config") + + apply := s3CircuitBreakerCommand.Bool("apply", false, "update and apply current configuration") + + if err = s3CircuitBreakerCommand.Parse(args); err != nil { + return nil + + } + + var buf bytes.Buffer + err = LoadConfig(commandEnv, dir, file, &buf) + if err != nil { + return err + } + + cbCfg := &s3_pb.S3CircuitBreakerConfig{ + Buckets: make(map[string]*s3_pb.S3CircuitBreakerOptions), + } + if buf.Len() > 0 { + if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), cbCfg); err != nil { + return err + } + } + + if *deleted { + cmdBuckets, cmdActions, _, err := c.initActionsAndValues(buckets, actions, limitType, values, true) + if err != nil { + return err + } + + if len(cmdBuckets) <= 0 && !*global { + if len(cmdActions) > 0 { + deleteGlobalActions(cbCfg, cmdActions, limitType) + if cbCfg.Buckets != nil { + var allBuckets []string + for bucket := range cbCfg.Buckets { + allBuckets = append(allBuckets, bucket) + } + deleteBucketsActions(allBuckets, cbCfg, cmdActions, limitType) + } + } else { + cbCfg.Global = nil + cbCfg.Buckets = nil + } + } else { + if len(cmdBuckets) > 0 { + deleteBucketsActions(cmdBuckets, cbCfg, cmdActions, limitType) + } + if *global { + deleteGlobalActions(cbCfg, cmdActions, nil) + } + } + } else { + cmdBuckets, cmdActions, cmdValues, err := c.initActionsAndValues(buckets, actions, limitType, values, *disabled) + if err != nil { + return err + } + + if len(cmdActions) > 0 && len(*buckets) <= 0 && !*global { + return fmt.Errorf("one of -global and -buckets must be specified") + } + + if len(*buckets) > 0 { + for _, bucket := range cmdBuckets { + var cbOptions *s3_pb.S3CircuitBreakerOptions + var exists bool + if cbOptions, exists = cbCfg.Buckets[bucket]; !exists { + cbOptions = &s3_pb.S3CircuitBreakerOptions{} + cbCfg.Buckets[bucket] = cbOptions + } + cbOptions.Enabled = !*disabled + + if len(cmdActions) > 0 { + err = insertOrUpdateValues(cbOptions, cmdActions, cmdValues, limitType) + if err != nil { + return err + } + } + + if len(cbOptions.Actions) <= 0 && !cbOptions.Enabled { + delete(cbCfg.Buckets, bucket) + } + } + } + + if *global { + globalOptions := cbCfg.Global + if globalOptions == nil { + globalOptions = &s3_pb.S3CircuitBreakerOptions{Actions: make(map[string]int64, len(cmdActions))} + cbCfg.Global = globalOptions + } + globalOptions.Enabled = !*disabled + + if len(cmdActions) > 0 { + err = insertOrUpdateValues(globalOptions, cmdActions, cmdValues, limitType) + if err != nil { + return err + } + } + + if len(globalOptions.Actions) <= 0 && !globalOptions.Enabled { + cbCfg.Global = nil + } + } + } + + buf.Reset() + err = filer.ProtoToText(&buf, cbCfg) + if err != nil { + return err + } + + _, _ = fmt.Fprintf(writer, string(buf.Bytes())) + _, _ = fmt.Fprintln(writer) + + if *apply { + if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + return filer.SaveInsideFiler(client, dir, file, buf.Bytes()) + }); err != nil { + return err + } + } + + return nil +} + +func loadConfig(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error { + if err := commandEnv.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { + return filer.ReadEntry(commandEnv.MasterClient, client, dir, file, buf) + }); err != nil && err != filer_pb.ErrNotFound { + return err + } + return nil +} + +func insertOrUpdateValues(cbOptions *s3_pb.S3CircuitBreakerOptions, cmdActions []string, cmdValues []int64, limitType *string) error { + if len(*limitType) == 0 { + return fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed") + } + + if cbOptions.Actions == nil { + cbOptions.Actions = make(map[string]int64, len(cmdActions)) + } + + if len(cmdValues) > 0 { + for i, action := range cmdActions { + cbOptions.Actions[s3_constants.Concat(action, *limitType)] = cmdValues[i] + } + } + return nil +} + +func deleteBucketsActions(cmdBuckets []string, cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []string, limitType *string) { + if cbCfg.Buckets == nil { + return + } + + if len(cmdActions) == 0 { + for _, bucket := range cmdBuckets { + delete(cbCfg.Buckets, bucket) + } + } else { + for _, bucket := range cmdBuckets { + if cbOption, ok := cbCfg.Buckets[bucket]; ok { + if len(cmdActions) > 0 && cbOption.Actions != nil { + for _, action := range cmdActions { + delete(cbOption.Actions, s3_constants.Concat(action, *limitType)) + } + } + + if len(cbOption.Actions) == 0 && !cbOption.Enabled { + delete(cbCfg.Buckets, bucket) + } + } + } + } + + if len(cbCfg.Buckets) == 0 { + cbCfg.Buckets = nil + } +} + +func deleteGlobalActions(cbCfg *s3_pb.S3CircuitBreakerConfig, cmdActions []string, limitType *string) { + globalOptions := cbCfg.Global + if globalOptions == nil { + return + } + + if len(cmdActions) == 0 && globalOptions.Actions != nil { + globalOptions.Actions = nil + return + } else { + for _, action := range cmdActions { + delete(globalOptions.Actions, s3_constants.Concat(action, *limitType)) + } + } + + if len(globalOptions.Actions) == 0 && !globalOptions.Enabled { + cbCfg.Global = nil + } +} + +func (c *commandS3CircuitBreaker) initActionsAndValues(buckets, actions, limitType, values *string, parseValues bool) (cmdBuckets, cmdActions []string, cmdValues []int64, err error) { + if len(*buckets) > 0 { + cmdBuckets = strings.Split(*buckets, ",") + } + + if len(*actions) > 0 { + cmdActions = strings.Split(*actions, ",") + + //check action valid + for _, action := range cmdActions { + var found bool + for _, allowedAction := range s3_constants.AllowedActions { + if allowedAction == action { + found = true + } + } + if !found { + return nil, nil, nil, fmt.Errorf("value(%s) of flag[-action] not valid, allowed actions: %v", *actions, s3_constants.AllowedActions) + } + } + } + + if !parseValues { + if len(cmdActions) < 0 { + for _, action := range s3_constants.AllowedActions { + cmdActions = append(cmdActions, action) + } + } + + if len(*limitType) > 0 { + switch *limitType { + case s3_constants.LimitTypeCount: + elements := strings.Split(*values, ",") + if len(cmdActions) != len(elements) { + if len(elements) != 1 || len(elements) == 0 { + return nil, nil, nil, fmt.Errorf("count of flag[-actions] and flag[-counts] not equal") + } + v, err := strconv.Atoi(elements[0]) + if err != nil { + return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)") + } + for range cmdActions { + cmdValues = append(cmdValues, int64(v)) + } + } else { + for _, value := range elements { + v, err := strconv.Atoi(value) + if err != nil { + return nil, nil, nil, fmt.Errorf("value of -values must be a legal number(s)") + } + cmdValues = append(cmdValues, int64(v)) + } + } + case s3_constants.LimitTypeBytes: + elements := strings.Split(*values, ",") + if len(cmdActions) != len(elements) { + if len(elements) != 1 || len(elements) == 0 { + return nil, nil, nil, fmt.Errorf("values count of -actions and -values not equal") + } + v, err := parseMBToBytes(elements[0]) + if err != nil { + return nil, nil, nil, fmt.Errorf("value of -max must be a legal number(s)") + } + for range cmdActions { + cmdValues = append(cmdValues, v) + } + } else { + for _, value := range elements { + v, err := parseMBToBytes(value) + if err != nil { + return nil, nil, nil, fmt.Errorf("value of -max must be a legal number(s)") + } + cmdValues = append(cmdValues, v) + } + } + default: + return nil, nil, nil, fmt.Errorf("type not valid, only 'count' and 'bytes' are allowed") + } + } else { + *limitType = "" + } + } + return cmdBuckets, cmdActions, cmdValues, nil +} + +func parseMBToBytes(valStr string) (int64, error) { + v, err := strconv.Atoi(valStr) + v *= 1024 * 1024 + return int64(v), err +} diff --git a/weed/shell/command_s3_circuitbreaker_test.go b/weed/shell/command_s3_circuitbreaker_test.go new file mode 100644 index 000000000..3d0b4ac6e --- /dev/null +++ b/weed/shell/command_s3_circuitbreaker_test.go @@ -0,0 +1,292 @@ +package shell + +import ( + "bytes" + "encoding/json" + "reflect" + "strings" + "testing" +) + +type Case struct { + args []string + result string +} + +var ( + TestCases = []*Case{ + //add circuit breaker config for global + { + args: strings.Split("-global -type Count -actions Read,Write -values 500,200", " "), + result: `{ + "global": { + "enabled": true, + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + } + }`, + }, + + //disable global config + { + args: strings.Split("-global -disable", " "), + result: `{ + "global": { + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + } + }`, + }, + + //add circuit breaker config for buckets x,y,z + { + args: strings.Split("-buckets x,y,z -type Count -actions Read,Write -values 200,100", " "), + result: `{ + "global": { + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + }, + "buckets": { + "x": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "y": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "z": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + } + } + }`, + }, + + //disable circuit breaker config of x + { + args: strings.Split("-buckets x -disable", " "), + result: `{ + "global": { + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + }, + "buckets": { + "x": { + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "y": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "z": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + } + } + }`, + }, + + //delete circuit breaker config of x + { + args: strings.Split("-buckets x -delete", " "), + result: `{ + "global": { + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + }, + "buckets": { + "y": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "z": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + } + } + }`, + }, + + //configure the circuit breaker for the size of the uploaded file for bucket x,y + { + args: strings.Split("-buckets x,y -type MB -actions Write -values 1024", " "), + result: `{ + "global": { + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + }, + "buckets": { + "x": { + "enabled": true, + "actions": { + "Write:MB": "1073741824" + } + }, + "y": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100", + "Write:MB": "1073741824" + } + }, + "z": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + } + } + }`, + }, + + //delete the circuit breaker configuration for the size of the uploaded file of bucket x,y + { + args: strings.Split("-buckets x,y -type MB -actions Write -delete", " "), + result: `{ + "global": { + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + }, + "buckets": { + "x": { + "enabled": true + }, + "y": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "z": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + } + } + }`, + }, + + //enable global circuit breaker config (without -disable flag) + { + args: strings.Split("-global", " "), + result: `{ + "global": { + "enabled": true, + "actions": { + "Read:Count": "500", + "Write:Count": "200" + } + }, + "buckets": { + "x": { + "enabled": true + }, + "y": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + }, + "z": { + "enabled": true, + "actions": { + "Read:Count": "200", + "Write:Count": "100" + } + } + } + }`, + }, + + //clear all circuit breaker config + { + args: strings.Split("-delete", " "), + result: `{ + + }`, + }, + } +) + +func TestCircuitBreakerShell(t *testing.T) { + var writeBuf bytes.Buffer + cmd := &commandS3CircuitBreaker{} + LoadConfig = func(commandEnv *CommandEnv, dir string, file string, buf *bytes.Buffer) error { + _, err := buf.Write(writeBuf.Bytes()) + if err != nil { + return err + } + writeBuf.Reset() + return nil + } + + for i, tc := range TestCases { + err := cmd.Do(tc.args, nil, &writeBuf) + if err != nil { + t.Fatal(err) + } + if i != 0 { + result := writeBuf.String() + + actual := make(map[string]interface{}) + err := json.Unmarshal([]byte(result), &actual) + if err != nil { + t.Error(err) + } + + expect := make(map[string]interface{}) + err = json.Unmarshal([]byte(result), &expect) + if err != nil { + t.Error(err) + } + if !reflect.DeepEqual(actual, expect) { + t.Fatal("result of s3 circuit breaker shell command is unexpect!") + } + } + } +}