1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-09-16 22:10:42 +02:00

return part of the chunk if chunkview is not the full chunk

This commit is contained in:
Chris Lu 2020-03-29 00:54:39 -07:00
parent ae2309dc58
commit 057722bbf4
2 changed files with 32 additions and 16 deletions

View file

@ -102,34 +102,46 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
}
func (c *ChunkReadAt) fetchChunkData(chunkView *filer2.ChunkView) ([]byte, error) {
func (c *ChunkReadAt) fetchChunkData(chunkView *filer2.ChunkView) (data []byte, err error) {
// fmt.Printf("fetching %s [%d,%d)\n", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
chunkData := c.chunkCache.GetChunk(chunkView.FileId)
if chunkData != nil {
glog.V(3).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
return chunkData, nil
} else {
chunkData, err = c.doFetchFullChunkData(chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
if err != nil {
return nil, err
}
}
urlString, err := c.lookupFileId(chunkView.FileId)
if int64(len(chunkData)) < chunkView.Offset+int64(chunkView.Size) {
return nil, fmt.Errorf("unexpected larger chunkView [%d,%d) than chunk %d", chunkView.Offset, chunkView.Offset+int64(chunkView.Size), len(chunkData))
}
data = chunkData[chunkView.Offset : chunkView.Offset+int64(chunkView.Size)]
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
return data, nil
}
func (c *ChunkReadAt) doFetchFullChunkData(fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlString, err := c.lookupFileId(fileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", fileId, err)
return nil, err
}
var buffer bytes.Buffer
err = util.ReadUrlAsStream(urlString, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk, chunkView.Offset, int(chunkView.Size), func(data []byte) {
err = util.ReadUrlAsStream(urlString, cipherKey, isGzipped, true, 0, 0, func(data []byte) {
buffer.Write(data)
})
if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
glog.V(1).Infof("read %s failed, err: %v", fileId, err)
return nil, err
}
glog.V(3).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
chunkData = buffer.Bytes()
c.chunkCache.SetChunk(chunkView.FileId, chunkData)
return chunkData, nil
return buffer.Bytes(), nil
}

View file

@ -193,7 +193,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isGzipped bool, isFullChunk bool,
if cipherKey != nil {
var n int
err := readEncryptedUrl(fileUrl, cipherKey, isGzipped, offset, size, func(data []byte) {
err := readEncryptedUrl(fileUrl, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
n = copy(buf, data)
})
return int64(n), err
@ -261,7 +261,7 @@ func ReadUrl(fileUrl string, cipherKey []byte, isGzipped bool, isFullChunk bool,
func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
if cipherKey != nil {
return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, offset, size, fn)
return readEncryptedUrl(fileUrl, cipherKey, isContentGzipped, isFullChunk, offset, size, fn)
}
req, err := http.NewRequest("GET", fileUrl, nil)
@ -300,7 +300,7 @@ func ReadUrlAsStream(fileUrl string, cipherKey []byte, isContentGzipped bool, is
}
func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentGzipped bool, offset int64, size int, fn func(data []byte)) error {
func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentGzipped bool, isFullChunk bool, offset int64, size int, fn func(data []byte)) error {
encryptedData, err := Get(fileUrl)
if err != nil {
return fmt.Errorf("fetch %s: %v", fileUrl, err)
@ -318,7 +318,11 @@ func readEncryptedUrl(fileUrl string, cipherKey []byte, isContentGzipped bool, o
if len(decryptedData) < int(offset)+size {
return fmt.Errorf("read decrypted %s size %d [%d, %d)", fileUrl, len(decryptedData), offset, int(offset)+size)
}
fn(decryptedData[int(offset) : int(offset)+size])
if isFullChunk {
fn(decryptedData)
} else {
fn(decryptedData[int(offset) : int(offset)+size])
}
return nil
}