1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-07-04 16:16:58 +02:00

reuse buffer by sync pool

This commit is contained in:
Chris Lu 2018-12-28 03:27:48 -08:00
parent 9123d799b8
commit 308ac1d0d2
3 changed files with 24 additions and 25 deletions

View file

@ -21,19 +21,23 @@ type ContinuousDirtyPages struct {
lock sync.Mutex
}
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
func newDirtyPages(file *File) *ContinuousDirtyPages {
return &ContinuousDirtyPages{
Data: make([]byte, file.wfs.option.ChunkSizeLimit),
Data: nil,
f: file,
}
}
func (pages *ContinuousDirtyPages) InitializeToFile(file *File) *ContinuousDirtyPages {
if len(pages.Data) != int(file.wfs.option.ChunkSizeLimit) {
pages.Data = make([]byte, file.wfs.option.ChunkSizeLimit)
func (pages *ContinuousDirtyPages) releaseResource() {
if pages.Data != nil {
pages.f.wfs.bufPool.Put(pages.Data)
}
pages.f = file
return pages
}
func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, data []byte) (chunks []*filer_pb.FileChunk, err error) {
@ -43,6 +47,10 @@ func (pages *ContinuousDirtyPages) AddPage(ctx context.Context, offset int64, da
var chunk *filer_pb.FileChunk
if pages.Data == nil {
pages.Data = pages.f.wfs.bufPool.Get().([]byte)
}
if len(data) > len(pages.Data) {
// this is more than what buffer can hold.
return pages.flushAndSave(ctx, offset, data)

View file

@ -38,16 +38,6 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
}
}
func (fh *FileHandle) InitializeToFile(file *File, uid, gid uint32) *FileHandle {
newHandle := &FileHandle{
f: file,
dirtyPages: fh.dirtyPages.InitializeToFile(file),
Uid: uid,
Gid: gid,
}
return newHandle
}
var _ = fs.Handle(&FileHandle{})
// var _ = fs.HandleReadAller(&FileHandle{})
@ -175,6 +165,8 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
glog.V(4).Infof("%v release fh %d", fh.f.fullpath(), fh.handle)
fh.dirtyPages.releaseResource()
fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
fh.f.isOpen = false

View file

@ -39,6 +39,7 @@ type WFS struct {
handles []*FileHandle
pathToHandleIndex map[string]int
pathToHandleLock sync.Mutex
bufPool sync.Pool
stats statsCache
}
@ -52,6 +53,11 @@ func NewSeaweedFileSystem(option *Option) *WFS {
option: option,
listDirectoryEntriesCache: ccache.New(ccache.Configure().MaxSize(int64(option.DirListingLimit) + 200).ItemsToPrune(100)),
pathToHandleIndex: make(map[string]int),
bufPool: sync.Pool{
New: func() interface{} {
return make([]byte, option.ChunkSizeLimit)
},
},
}
}
@ -76,17 +82,10 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand
index, found := wfs.pathToHandleIndex[fullpath]
if found && wfs.handles[index] != nil {
glog.V(4).Infoln(fullpath, "found fileHandle id", index)
glog.V(2).Infoln(fullpath, "found fileHandle id", index)
return wfs.handles[index]
}
if found && wfs.handles[index] != nil {
glog.V(4).Infoln(fullpath, "reuse previous fileHandle id", index)
wfs.handles[index].InitializeToFile(file, uid, gid)
fileHandle.handle = uint64(index)
return
}
fileHandle = newFileHandle(file, uid, gid)
for i, h := range wfs.handles {
if h == nil {
@ -100,7 +99,7 @@ func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHand
wfs.handles = append(wfs.handles, fileHandle)
fileHandle.handle = uint64(len(wfs.handles) - 1)
glog.V(4).Infoln(fullpath, "new fileHandle id", fileHandle.handle)
glog.V(2).Infoln(fullpath, "new fileHandle id", fileHandle.handle)
wfs.pathToHandleIndex[fullpath] = int(fileHandle.handle)
return