1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-07-03 23:56:41 +02:00

Revert "Changing needle_byte_cache so that it doesn't grow so big when larger files are added."

This reverts commit 87fee21ef5.
This commit is contained in:
Mike Tolman 2016-08-05 15:46:45 -06:00
parent 0f4c7dd8fd
commit 0d331c1e3a

View file

@ -8,7 +8,6 @@ import (
"github.com/hashicorp/golang-lru"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/glog"
)
var (
@ -25,7 +24,7 @@ In caching, the string~[]byte mapping is cached
*/
func init() {
bytesPool = util.NewBytesPool()
bytesCache, _ = lru.NewWithEvict(50, func(key interface{}, value interface{}) {
bytesCache, _ = lru.NewWithEvict(512, func(key interface{}, value interface{}) {
value.(*Block).decreaseReference()
})
}
@ -47,37 +46,22 @@ func (block *Block) increaseReference() {
// get bytes from the LRU cache of []byte first, then from the bytes pool
// when []byte in LRU cache is evicted, it will be put back to the bytes pool
func getBytesForFileBlock(r *os.File, offset int64, readSize int) (dataSlice []byte, block *Block, err error) {
//Skip the cache if we are looking for a block that is too big to fit in the cache (defaulting to 10MB)
cacheable := readSize <= (1024*1024*10)
if !cacheable {
glog.V(4).Infoln("Block too big to keep in cache. Size:", readSize)
}
cacheKey := string("")
if cacheable {
// check cache, return if found
cacheKey = fmt.Sprintf("%d:%d:%d", r.Fd(), offset >> 3, readSize)
cacheKey := fmt.Sprintf("%d:%d:%d", r.Fd(), offset>>3, readSize)
if obj, found := bytesCache.Get(cacheKey); found {
glog.V(4).Infoln("Found block in cache. Size:", readSize)
block = obj.(*Block)
block.increaseReference()
dataSlice = block.Bytes[0:readSize]
return dataSlice, block, nil
}
}
// get the []byte from pool
b := bytesPool.Get(readSize)
// refCount = 2, one by the bytesCache, one by the actual needle object
refCount := int32(1)
if cacheable {
refCount = 2
}
block = &Block{Bytes: b, refCount: refCount}
block = &Block{Bytes: b, refCount: 2}
dataSlice = block.Bytes[0:readSize]
_, err = r.ReadAt(dataSlice, offset)
if cacheable {
bytesCache.Add(cacheKey, block)
}
return dataSlice, block, err
}