diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go index 854b35f82..9bf2df6ef 100644 --- a/weed/server/filer_server_handlers_write_autochunk.go +++ b/weed/server/filer_server_handlers_write_autochunk.go @@ -164,6 +164,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } var entry *filer.Entry + var newChunks []*filer_pb.FileChunk var mergedChunks []*filer_pb.FileChunk isAppend := isAppend(r) @@ -186,7 +187,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } entry.FileSize += uint64(chunkOffset) } - mergedChunks = append(entry.Chunks, fileChunks...) + newChunks = append(entry.Chunks, fileChunks...) // TODO if len(entry.Content) > 0 { @@ -196,7 +197,7 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } else { glog.V(4).Infoln("saving", path) - mergedChunks = fileChunks + newChunks = fileChunks entry = &filer.Entry{ FullPath: util.FullPath(path), Attr: filer.Attr{ @@ -217,6 +218,13 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa } } + // maybe concatenate small chunks into one whole chunk + mergedChunks, replyerr = fs.maybeMergeChunks(so, newChunks) + if replyerr != nil { + glog.V(0).Infof("merge chunks %s: %v", r.RequestURI, replyerr) + mergedChunks = newChunks + } + // maybe compact entry chunks mergedChunks, replyerr = filer.MaybeManifestize(fs.saveAsChunk(so), mergedChunks) if replyerr != nil { diff --git a/weed/server/filer_server_handlers_write_merge.go b/weed/server/filer_server_handlers_write_merge.go new file mode 100644 index 000000000..dadc6f726 --- /dev/null +++ b/weed/server/filer_server_handlers_write_merge.go @@ -0,0 +1,11 @@ +package weed_server + +import ( + "github.com/chrislusf/seaweedfs/weed/operation" + "github.com/chrislusf/seaweedfs/weed/pb/filer_pb" +) + +func (fs *FilerServer) maybeMergeChunks(so *operation.StorageOption, inputChunks []*filer_pb.FileChunk) (mergedChunks []*filer_pb.FileChunk, err error) { + //TODO merge consecutive smaller chunks into a large chunk to reduce number of chunks + return inputChunks, nil +}