1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-03 18:10:12 +02:00

[s3] fix s3 test_multipart_get_part (#5476)

* try fix s3  test_multipart_get_part

* add passed s3 tests

* fix SeaweedFSUploadId

* rm spaces

* convert part request to range

* add passed s3 tests of multipart
This commit is contained in:
Konstantin Lebedev 2024-04-14 22:41:32 +05:00 committed by GitHub
parent 8833745fbc
commit 33537ae29f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 53 additions and 11 deletions

View file

@ -43,7 +43,7 @@ jobs:
cd /__w/seaweedfs/seaweedfs/weed
go install -buildvcs=false
set -x
nohup weed -v 0 server -filer -s3 -ip.bind 0.0.0.0 \
nohup weed -v 0 server -filer -filer.maxMB=64 -s3 -ip.bind 0.0.0.0 \
-master.raftHashicorp -master.electionTimeout 1s -master.volumeSizeLimitMB=1024 \
-volume.max=100 -volume.preStopSeconds=1 -s3.port=8000 -metricsPort=9324 \
-s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=../docker/compose/s3.json &
@ -164,12 +164,19 @@ jobs:
s3tests_boto3/functional/test_s3.py::test_object_copy_key_not_found \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_small \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_without_range \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_special_names \
s3tests_boto3/functional/test_s3.py::test_multipart_copy_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_get_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_empty \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_multiple_sizes \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_contents \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_overwrite_existing_object \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_size_too_small \
s3tests_boto3/functional/test_s3.py::test_multipart_resend_first_finishes_last \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_resend_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_missing_part \
s3tests_boto3/functional/test_s3.py::test_multipart_upload_incorrect_etag \
s3tests_boto3/functional/test_s3.py::test_abort_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_list_multipart_upload \
s3tests_boto3/functional/test_s3.py::test_atomic_read_1mb \

View file

@ -15,7 +15,7 @@ full_install:
cd weed; go install -tags "elastic gocdk sqlite ydb tikv rclone"
server: install
weed -v 0 server -s3 -filer -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
weed -v 0 server -s3 -filer -filer.maxMB=64 -volume.max=0 -master.volumeSizeLimitMB=1024 -volume.preStopSeconds=1 -s3.port=8000 -s3.allowEmptyFolder=false -s3.allowDeleteBucketNotEmpty=true -s3.config=./docker/compose/s3.json -metricsPort=9324
benchmark: install warp_install
pkill weed || true

View file

@ -103,7 +103,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
if len(entries) == 0 {
entryName, dirName := s3a.getEntryNameAndDir(input)
if entry, _ := s3a.getEntry(dirName, entryName); entry != nil && entry.Extended != nil {
if uploadId, ok := entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id]; ok && *input.UploadId == string(uploadId) {
if uploadId, ok := entry.Extended[s3_constants.SeaweedFSUploadId]; ok && *input.UploadId == string(uploadId) {
return &CompleteMultipartUploadResult{
CompleteMultipartUploadOutput: s3.CompleteMultipartUploadOutput{
Location: aws.String(fmt.Sprintf("http://%s%s/%s", s3a.option.Filer.ToHttpAddress(), urlEscapeObject(dirName), urlPathEscape(entryName))),
@ -222,7 +222,7 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
if entry.Extended == nil {
entry.Extended = make(map[string][]byte)
}
entry.Extended[s3_constants.X_SeaweedFS_Header_Upload_Id] = []byte(*input.UploadId)
entry.Extended[s3_constants.SeaweedFSUploadId] = []byte(*input.UploadId)
for k, v := range pentry.Extended {
if k != "key" {
entry.Extended[k] = v

View file

@ -38,8 +38,9 @@ const (
AmzObjectTaggingDirective = "X-Amz-Tagging-Directive"
AmzTagCount = "x-amz-tagging-count"
X_SeaweedFS_Header_Directory_Key = "x-seaweedfs-is-directory-key"
X_SeaweedFS_Header_Upload_Id = "X-Seaweedfs-Upload-Id"
SeaweedFSIsDirectoryKey = "X-Seaweedfs-Is-Directory-Key"
SeaweedFSPartNumber = "X-Seaweedfs-Part-Number"
SeaweedFSUploadId = "X-Seaweedfs-Upload-Id"
// S3 ACL headers
AmzCannedAcl = "X-Amz-Acl"
@ -48,6 +49,8 @@ const (
AmzAclWrite = "X-Amz-Grant-Write"
AmzAclReadAcp = "X-Amz-Grant-Read-Acp"
AmzAclWriteAcp = "X-Amz-Grant-Write-Acp"
AmzMpPartsCount = "X-Amz-Mp-Parts-Count"
)
// Non-Standard S3 HTTP request constants

View file

@ -370,6 +370,9 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
if _, ok := s3_constants.PassThroughHeaders[strings.ToLower(k)]; ok {
proxyReq.Header[k] = v
}
if k == "partNumber" {
proxyReq.Header[s3_constants.SeaweedFSPartNumber] = v
}
}
for header, values := range r.Header {
proxyReq.Header[header] = values
@ -411,7 +414,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
}
TimeToFirstByte(r.Method, start, r)
if resp.Header.Get(s3_constants.X_SeaweedFS_Header_Directory_Key) == "true" {
if resp.Header.Get(s3_constants.SeaweedFSIsDirectoryKey) == "true" {
responseStatusCode := responseFn(resp, w)
s3err.PostLog(r, responseStatusCode, s3err.ErrNone)
return
@ -429,6 +432,18 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
return
}
if resp.StatusCode == http.StatusBadRequest {
resp_body, _ := io.ReadAll(resp.Body)
switch string(resp_body) {
case "InvalidPart":
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidPart)
default:
s3err.WriteErrorResponse(w, r, s3err.ErrInvalidRequest)
}
resp.Body.Close()
return
}
setUserMetadataKeyToLowercase(resp)
responseStatusCode := responseFn(resp, w)

View file

@ -3,6 +3,8 @@ package weed_server
import (
"bytes"
"context"
"encoding/base64"
"encoding/hex"
"errors"
"fmt"
"io"
@ -132,7 +134,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return
}
// inform S3 API this is a user created directory key object
w.Header().Set(s3_constants.X_SeaweedFS_Header_Directory_Key, "true")
w.Header().Set(s3_constants.SeaweedFSIsDirectoryKey, "true")
}
if isForDirectory && entry.Attr.Mime != s3_constants.FolderMimeType {
@ -158,7 +160,22 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
return
}
etag := filer.ETagEntry(entry)
var etag string
if partNumber, errNum := strconv.Atoi(r.Header.Get(s3_constants.SeaweedFSPartNumber)); errNum == nil {
if len(entry.Chunks) < partNumber {
stats.FilerHandlerCounter.WithLabelValues(stats.ErrorReadChunk).Inc()
w.WriteHeader(http.StatusBadRequest)
w.Write([]byte("InvalidPart"))
return
}
w.Header().Set(s3_constants.AmzMpPartsCount, strconv.Itoa(len(entry.Chunks)))
partChunk := entry.GetChunks()[partNumber-1]
md5, _ := base64.StdEncoding.DecodeString(partChunk.ETag)
etag = hex.EncodeToString(md5)
r.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", partChunk.Offset, uint64(partChunk.Offset)+partChunk.Size-1))
} else {
etag = filer.ETagEntry(entry)
}
w.Header().Set("Accept-Ranges", "bytes")
// mime type
@ -207,7 +224,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
filename := entry.Name()
adjustPassthroughHeaders(w, r, filename)
totalSize := int64(entry.Size())
if r.Method == "HEAD" {

View file

@ -41,6 +41,7 @@ const (
ErrorWriteEntry = "write.entry.failed"
RepeatErrorUploadContent = "upload.content.repeat.failed"
ErrorChunkAssign = "chunkAssign.failed"
ErrorReadChunk = "read.chunk.failed"
ErrorReadCache = "read.cache.failed"
ErrorReadStream = "read.stream.failed"