1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-21 10:52:16 +02:00

[s3] add s3 pass test_multipart_upload_size_too_small (#5475)

* add s3 pass test_multipart_upload_size_too_small

* refactor metric names

* return ErrNoSuchUpload if empty parts

* fix test
This commit is contained in:
Konstantin Lebedev 2024-04-07 23:52:35 +05:00 committed by GitHub
parent 35cba720a5
commit 3e25ed1b11
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 26 additions and 9 deletions

View file

@ -169,6 +169,7 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \
s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \
@ -181,7 +182,6 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_4mb \
s3tests_boto3/functional/test_s3.py::test_atomic_dual_write_8mb \
s3tests_boto3/functional/test_s3.py::test_atomic_multipart_upload_write \
s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \
s3tests_boto3/functional/test_s3.py::test_ranged_request_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_big_request_response_code \
s3tests_boto3/functional/test_s3.py::test_ranged_request_skip_leading_bytes_response_code \

View file

@ -25,7 +25,10 @@ import (
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
const multipartExt = ".part"
const (
multipartExt = ".part"
multiPartMinSize = 5 * 1024 * 1024
)
type InitiateMultipartUploadResult struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult"`
@ -75,6 +78,10 @@ type CompleteMultipartUploadResult struct {
func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploadInput, parts *CompleteMultipartUpload) (output *CompleteMultipartUploadResult, code s3err.ErrorCode) {
glog.V(2).Infof("completeMultipartUpload input %v", input)
if len(parts.Parts) == 0 {
stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompletedNoSuchUpload).Inc()
return nil, s3err.ErrNoSuchUpload
}
completedPartNumbers := []int{}
completedPartMap := make(map[int][]string)
for _, part := range parts.Parts {
@ -83,8 +90,9 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
completedPartMap[part.PartNumber] = append(completedPartMap[part.PartNumber], part.ETag)
}
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
sort.Ints(completedPartNumbers)
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId
entries, _, err := s3a.list(uploadDirectory, "", "", false, maxPartsList)
if err != nil {
glog.Errorf("completeMultipartUpload %s %s error: %v, entries:%d", *input.Bucket, *input.UploadId, err, len(entries))
@ -118,6 +126,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
}
deleteEntries := []*filer_pb.Entry{}
partEntries := make(map[int][]*filer_pb.Entry, len(entries))
entityTooSmall := false
for _, entry := range entries {
foundEntry := false
glog.V(4).Infof("completeMultipartUpload part entries %s", entry.Name)
@ -156,16 +165,23 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
partEntries[partNumber] = append(partEntries[partNumber], entry)
foundEntry = true
}
if !foundEntry {
if foundEntry {
if len(completedPartNumbers) > 1 && partNumber != completedPartNumbers[len(completedPartNumbers)-1] &&
entry.Attributes.FileSize < multiPartMinSize {
glog.Warningf("completeMultipartUpload %s part file size less 5mb", entry.Name)
entityTooSmall = true
}
} else {
deleteEntries = append(deleteEntries, entry)
}
}
if entityTooSmall {
stats.S3HandlerCounter.WithLabelValues(stats.ErrorCompleteEntityTooSmall).Inc()
return nil, s3err.ErrEntityTooSmall
}
mime := pentry.Attributes.Mime
var finalParts []*filer_pb.FileChunk
var offset int64
sort.Ints(completedPartNumbers)
for _, partNumber := range completedPartNumbers {
partEntriesByNumber, ok := partEntries[partNumber]
if !ok {

View file

@ -46,8 +46,9 @@ const (
// s3 handler
ErrorCompletedNoSuchUpload = "errorCompletedNoSuchUpload"
ErrorCompletedPartEmpty = "ErrorCompletedPartEmpty"
ErrorCompletedPartNumber = "ErrorCompletedPartNumber"
ErrorCompleteEntityTooSmall = "errorCompleteEntityTooSmall"
ErrorCompletedPartEmpty = "errorCompletedPartEmpty"
ErrorCompletedPartNumber = "errorCompletedPartNumber"
ErrorCompletedPartNotFound = "errorCompletedPartNotFound"
ErrorCompletedEtagInvalid = "errorCompletedEtagInvalid"
ErrorCompletedEtagMismatch = "errorCompletedEtagMismatch"