1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-09-18 06:50:35 +02:00

refactor Offset into a struct of bytes

This commit is contained in:
Chris Lu 2019-04-08 19:40:56 -07:00
parent a41ba79119
commit 000ee725fc
15 changed files with 111 additions and 102 deletions

View file

@ -107,7 +107,7 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *storage.Needle, offset i
nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d gzip %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped(), ok, nv)
if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && int64(nv.Offset)*types.NeedlePaddingSize == offset {
if ok && nv.Size > 0 && nv.Size != types.TombstoneFileSize && nv.Offset.ToAcutalOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)

View file

@ -45,11 +45,11 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *storage.Needle, offset int64) error {
glog.V(2).Infof("key %d offset %d size %d disk_size %d gzip %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsGzipped())
if n.Size > 0 && n.Size != types.TombstoneFileSize {
pe := scanner.nm.Put(n.Id, types.Offset(offset/types.NeedlePaddingSize), n.Size)
pe := scanner.nm.Put(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
} else {
glog.V(2).Infof("skipping deleted file ...")
return scanner.nm.Delete(n.Id, types.Offset(offset/types.NeedlePaddingSize))
return scanner.nm.Delete(n.Id, types.ToOffset(offset))
}
return nil
}

View file

@ -3,7 +3,6 @@ package weed_server
import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/storage/types"
"io"
"os"
@ -28,7 +27,7 @@ func (vs *VolumeServer) VolumeFollow(req *volume_server_pb.VolumeFollowRequest,
return nil
}
startOffset := int64(foundOffset) * int64(types.NeedlePaddingSize)
startOffset := foundOffset.ToAcutalOffset()
buf := make([]byte, 1024*1024*2)
return sendFileContent(v.DataFile(), buf, startOffset, stopOffset, stream)

View file

@ -26,7 +26,7 @@ func (cm *BtreeMap) Set(key NeedleId, offset Offset, size uint32) (oldOffset Off
}
func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) {
found := cm.tree.Delete(NeedleValue{key, 0, 0})
found := cm.tree.Delete(NeedleValue{key, Offset{}, 0})
if found != nil {
old := found.(NeedleValue)
return old.Size
@ -34,7 +34,7 @@ func (cm *BtreeMap) Delete(key NeedleId) (oldSize uint32) {
return
}
func (cm *BtreeMap) Get(key NeedleId) (*NeedleValue, bool) {
found := cm.tree.Get(NeedleValue{key, 0, 0})
found := cm.tree.Get(NeedleValue{key, Offset{}, 0})
if found != nil {
old := found.(NeedleValue)
return &old, true

View file

@ -62,7 +62,7 @@ func loadNewNeedleMap(file *os.File) (*CompactMap, uint64) {
offset := BytesToOffset(bytes[i+NeedleIdSize : i+NeedleIdSize+OffsetSize])
size := util.BytesToUint32(bytes[i+NeedleIdSize+OffsetSize : i+NeedleIdSize+OffsetSize+SizeSize])
if offset > 0 {
if !offset.IsZero() {
m.Set(NeedleId(key), offset, size)
} else {
m.Delete(key)

View file

@ -1,22 +1,23 @@
package needle
import (
"fmt"
. "github.com/chrislusf/seaweedfs/weed/storage/types"
"testing"
)
func TestOverflow2(t *testing.T) {
m := NewCompactMap()
m.Set(NeedleId(150088), 8, 3000073)
m.Set(NeedleId(150073), 8, 3000073)
m.Set(NeedleId(150089), 8, 3000073)
m.Set(NeedleId(150076), 8, 3000073)
m.Set(NeedleId(150124), 8, 3000073)
m.Set(NeedleId(150137), 8, 3000073)
m.Set(NeedleId(150147), 8, 3000073)
m.Set(NeedleId(150145), 8, 3000073)
m.Set(NeedleId(150158), 8, 3000073)
m.Set(NeedleId(150162), 8, 3000073)
m.Set(NeedleId(150088), ToOffset(8), 3000073)
m.Set(NeedleId(150073), ToOffset(8), 3000073)
m.Set(NeedleId(150089), ToOffset(8), 3000073)
m.Set(NeedleId(150076), ToOffset(8), 3000073)
m.Set(NeedleId(150124), ToOffset(8), 3000073)
m.Set(NeedleId(150137), ToOffset(8), 3000073)
m.Set(NeedleId(150147), ToOffset(8), 3000073)
m.Set(NeedleId(150145), ToOffset(8), 3000073)
m.Set(NeedleId(150158), ToOffset(8), 3000073)
m.Set(NeedleId(150162), ToOffset(8), 3000073)
m.Visit(func(value NeedleValue) error {
println("needle key:", value.Key)
@ -26,13 +27,13 @@ func TestOverflow2(t *testing.T) {
func TestIssue52(t *testing.T) {
m := NewCompactMap()
m.Set(NeedleId(10002), 10002, 10002)
m.Set(NeedleId(10002), ToOffset(10002), 10002)
if element, ok := m.Get(NeedleId(10002)); ok {
println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size)
fmt.Printf("key %d ok %v %d, %v, %d\n", 10002, ok, element.Key, element.Offset, element.Size)
}
m.Set(NeedleId(10001), 10001, 10001)
m.Set(NeedleId(10001), ToOffset(10001), 10001)
if element, ok := m.Get(NeedleId(10002)); ok {
println("key", 10002, "ok", ok, element.Key, element.Offset, element.Size)
fmt.Printf("key %d ok %v %d, %v, %d\n", 10002, ok, element.Key, element.Offset, element.Size)
} else {
t.Fatal("key 10002 missing after setting 10001")
}
@ -41,7 +42,7 @@ func TestIssue52(t *testing.T) {
func TestCompactMap(t *testing.T) {
m := NewCompactMap()
for i := uint32(0); i < 100*batch; i += 2 {
m.Set(NeedleId(i), Offset(i), i)
m.Set(NeedleId(i), ToOffset(int64(i)), i)
}
for i := uint32(0); i < 100*batch; i += 37 {
@ -49,7 +50,7 @@ func TestCompactMap(t *testing.T) {
}
for i := uint32(0); i < 10*batch; i += 3 {
m.Set(NeedleId(i), Offset(i+11), i+5)
m.Set(NeedleId(i), ToOffset(int64(i+11)), i+5)
}
// for i := uint32(0); i < 100; i++ {
@ -99,17 +100,17 @@ func TestCompactMap(t *testing.T) {
func TestOverflow(t *testing.T) {
o := Overflow(make([]SectionalNeedleValue, 0))
o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: 12, Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 2, Offset: 12, Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: 12, Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: 12, Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 5, Offset: 12, Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: ToOffset(12), Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 2, Offset: ToOffset(12), Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: ToOffset(12), Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: ToOffset(12), Size: 12})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 5, Offset: ToOffset(12), Size: 12})
if o[2].Key != 3 {
t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key)
}
o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: 24, Size: 24})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 3, Offset: ToOffset(24), Size: 24})
if o[2].Key != 3 {
t.Fatalf("expecting o[2] has key 3: %+v", o[2].Key)
@ -142,13 +143,13 @@ func TestOverflow(t *testing.T) {
}
println()
o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: 44, Size: 44})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 4, Offset: ToOffset(44), Size: 44})
for i, x := range o {
println("overflow[", i, "]:", x.Key)
}
println()
o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: 11, Size: 11})
o = o.setOverflowEntry(SectionalNeedleValue{Key: 1, Offset: ToOffset(11), Size: 11})
for i, x := range o {
println("overflow[", i, "]:", x.Key)

View file

@ -68,7 +68,7 @@ func generateBoltDbFile(dbFileName string, indexFile *os.File) error {
}
defer db.Close()
return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
if offset > 0 && size != TombstoneFileSize {
if !offset.IsZero() && size != TombstoneFileSize {
boltDbWrite(db, key, offset, size)
} else {
boltDbDelete(db, key)

View file

@ -63,7 +63,7 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error {
}
defer db.Close()
return WalkIndexFile(indexFile, func(key NeedleId, offset Offset, size uint32) error {
if offset > 0 && size != TombstoneFileSize {
if !offset.IsZero() && size != TombstoneFileSize {
levelDbWrite(db, key, offset, size)
} else {
levelDbDelete(db, key)

View file

@ -50,12 +50,12 @@ func doLoading(file *os.File, nm *NeedleMap) (*NeedleMap, error) {
if key > nm.MaximumFileKey {
nm.MaximumFileKey = key
}
if offset > 0 && size != TombstoneFileSize {
if !offset.IsZero() && size != TombstoneFileSize {
nm.FileCounter++
nm.FileByteCounter = nm.FileByteCounter + uint64(size)
oldOffset, oldSize := nm.m.Set(NeedleId(key), offset, size)
// glog.V(3).Infoln("reading key", key, "offset", offset*NeedlePaddingSize, "size", size, "oldSize", oldSize)
if oldOffset > 0 && oldSize != TombstoneFileSize {
if !oldOffset.IsZero() && oldSize != TombstoneFileSize {
nm.DeletionCounter++
nm.DeletionByteCounter = nm.DeletionByteCounter + uint64(oldSize)
}

View file

@ -7,7 +7,15 @@ import (
"strconv"
)
type Offset uint32
type Offset struct {
// b5 byte // unused
// b4 byte // unused
b3 byte
b2 byte
b1 byte
b0 byte // the smaller byte
}
type Cookie uint32
const (
@ -41,13 +49,43 @@ func ParseCookie(cookieString string) (Cookie, error) {
}
func OffsetToBytes(bytes []byte, offset Offset) {
util.Uint32toBytes(bytes, uint32(offset))
bytes[3] = offset.b0
bytes[2] = offset.b1
bytes[1] = offset.b2
bytes[0] = offset.b3
}
func Uint32ToOffset(offset uint32) Offset {
return Offset(offset)
return Offset{
b0: byte(offset),
b1: byte(offset >> 8),
b2: byte(offset >> 16),
b3: byte(offset >> 24),
}
}
func BytesToOffset(bytes []byte) Offset {
return Offset(util.BytesToUint32(bytes[0:4]))
return Offset{
b0: bytes[3],
b1: bytes[2],
b2: bytes[1],
b3: bytes[0],
}
}
func (offset Offset) IsZero() bool {
return offset.b0 == 0 && offset.b1 == 0 && offset.b2 == 0 && offset.b3 == 0
}
func ToOffset(offset int64) Offset {
smaller := uint32(offset / int64(NeedlePaddingSize))
return Uint32ToOffset(smaller)
}
func (offset Offset) ToAcutalOffset() (actualOffset int64) {
return (int64(offset.b0) + int64(offset.b1)<<8 + int64(offset.b2)<<16 + int64(offset.b3)<<24) * int64(NeedlePaddingSize)
}
func (offset Offset) String() string {
return fmt.Sprintf("%d", int64(offset.b0)+int64(offset.b1)<<8+int64(offset.b2)<<16+int64(offset.b3)<<24)
}

View file

@ -26,10 +26,10 @@ func CheckVolumeDataIntegrity(v *Volume, indexFile *os.File) error {
return fmt.Errorf("readLastIndexEntry %s failed: %v", indexFile.Name(), e)
}
key, offset, size := IdxFileEntry(lastIdxEntry)
if offset == 0 || size == TombstoneFileSize {
if offset.IsZero() || size == TombstoneFileSize {
return nil
}
if e = verifyNeedleIntegrity(v.dataFile, v.Version(), int64(offset)*NeedlePaddingSize, key, size); e != nil {
if e = verifyNeedleIntegrity(v.dataFile, v.Version(), offset.ToAcutalOffset(), key, size); e != nil {
return fmt.Errorf("verifyNeedleIntegrity %s failed: %v", indexFile.Name(), e)
}

View file

@ -110,7 +110,7 @@ func (v *Volume) findLastAppendAtNs() (uint64, error) {
if err != nil {
return 0, err
}
if offset == 0 {
if offset.IsZero() {
return 0, nil
}
return v.readAppendAtNs(offset)
@ -119,26 +119,26 @@ func (v *Volume) findLastAppendAtNs() (uint64, error) {
func (v *Volume) locateLastAppendEntry() (Offset, error) {
indexFile, e := os.OpenFile(v.FileName()+".idx", os.O_RDONLY, 0644)
if e != nil {
return 0, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e)
return Offset{}, fmt.Errorf("cannot read %s.idx: %v", v.FileName(), e)
}
defer indexFile.Close()
fi, err := indexFile.Stat()
if err != nil {
return 0, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err)
return Offset{}, fmt.Errorf("file %s stat error: %v", indexFile.Name(), err)
}
fileSize := fi.Size()
if fileSize%NeedleEntrySize != 0 {
return 0, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
return Offset{}, fmt.Errorf("unexpected file %s size: %d", indexFile.Name(), fileSize)
}
if fileSize == 0 {
return 0, nil
return Offset{}, nil
}
bytes := make([]byte, NeedleEntrySize)
n, e := indexFile.ReadAt(bytes, fileSize-NeedleEntrySize)
if n != NeedleEntrySize {
return 0, fmt.Errorf("file %s read error: %v", indexFile.Name(), e)
return Offset{}, fmt.Errorf("file %s read error: %v", indexFile.Name(), e)
}
_, offset, _ := IdxFileEntry(bytes)
@ -147,13 +147,13 @@ func (v *Volume) locateLastAppendEntry() (Offset, error) {
func (v *Volume) readAppendAtNs(offset Offset) (uint64, error) {
n, bodyLength, err := ReadNeedleHeader(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize)
n, bodyLength, err := ReadNeedleHeader(v.dataFile, v.SuperBlock.version, offset.ToAcutalOffset())
if err != nil {
return 0, fmt.Errorf("ReadNeedleHeader: %v", err)
}
err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, int64(offset)*NeedlePaddingSize+int64(NeedleEntrySize), bodyLength)
err = n.ReadNeedleBody(v.dataFile, v.SuperBlock.version, offset.ToAcutalOffset()+int64(NeedleEntrySize), bodyLength)
if err != nil {
return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", int64(offset)*NeedlePaddingSize, bodyLength, err)
return 0, fmt.Errorf("ReadNeedleBody offset %d, bodyLength %d: %v", offset.ToAcutalOffset(), bodyLength, err)
}
return n.AppendAtNs, nil
@ -189,7 +189,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast
m := (l + h) / 2
if m == entryCount {
return 0, true, nil
return Offset{}, true, nil
}
// read the appendAtNs for entry m
@ -214,7 +214,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast
}
if l == entryCount {
return 0, true, nil
return Offset{}, true, nil
}
offset, err = v.readAppendAtNsForIndexEntry(indexFile, bytes, l)
@ -226,7 +226,7 @@ func (v *Volume) BinarySearchByAppendAtNs(sinceNs uint64) (offset Offset, isLast
// bytes is of size NeedleEntrySize
func (v *Volume) readAppendAtNsForIndexEntry(indexFile *os.File, bytes []byte, m int64) (Offset, error) {
if _, readErr := indexFile.ReadAt(bytes, m*NeedleEntrySize); readErr != nil && readErr != io.EOF {
return 0, readErr
return Offset{}, readErr
}
_, offset, _ := IdxFileEntry(bytes)
return offset, nil
@ -247,7 +247,7 @@ func (scanner *VolumeFileScanner4GenIdx) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4GenIdx) VisitNeedle(n *Needle, offset int64) error {
if n.Size > 0 && n.Size != TombstoneFileSize {
return scanner.v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size)
return scanner.v.nm.Put(n.Id, ToOffset(offset), n.Size)
}
return scanner.v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize))
return scanner.v.nm.Delete(n.Id, ToOffset(offset))
}

View file

@ -21,9 +21,9 @@ func (v *Volume) isFileUnchanged(n *Needle) bool {
return false
}
nv, ok := v.nm.Get(n.Id)
if ok && nv.Offset > 0 {
if ok && !nv.Offset.IsZero() {
oldNeedle := new(Needle)
err := oldNeedle.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version())
err := oldNeedle.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
glog.V(0).Infof("Failed to check updated file %v", err)
return false
@ -96,8 +96,8 @@ func (v *Volume) writeNeedle(n *Needle) (offset uint64, size uint32, err error)
}
nv, ok := v.nm.Get(n.Id)
if !ok || uint64(nv.Offset)*NeedlePaddingSize < offset {
if err = v.nm.Put(n.Id, Offset(offset/NeedlePaddingSize), n.Size); err != nil {
if !ok || uint64(nv.Offset.ToAcutalOffset()) < offset {
if err = v.nm.Put(n.Id, ToOffset(int64(offset)), n.Size); err != nil {
glog.V(4).Infof("failed to save in needle map %d: %v", n.Id, err)
}
}
@ -124,7 +124,7 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) {
if err != nil {
return size, err
}
if err = v.nm.Delete(n.Id, Offset(offset/NeedlePaddingSize)); err != nil {
if err = v.nm.Delete(n.Id, ToOffset(int64(offset))); err != nil {
return size, err
}
return size, err
@ -135,10 +135,10 @@ func (v *Volume) deleteNeedle(n *Needle) (uint32, error) {
// read fills in Needle content by looking up n.Id from NeedleMapper
func (v *Volume) readNeedle(n *Needle) (int, error) {
nv, ok := v.nm.Get(n.Id)
if !ok || nv.Offset == 0 {
if !ok || nv.Offset.IsZero() {
v.compactingWg.Wait()
nv, ok = v.nm.Get(n.Id)
if !ok || nv.Offset == 0 {
if !ok || nv.Offset.IsZero() {
return -1, ErrorNotFound
}
}
@ -148,7 +148,7 @@ func (v *Volume) readNeedle(n *Needle) (int, error) {
if nv.Size == 0 {
return 0, nil
}
err := n.ReadData(v.dataFile, int64(nv.Offset)*NeedlePaddingSize, nv.Size, v.Version())
err := n.ReadData(v.dataFile, nv.Offset.ToAcutalOffset(), nv.Size, v.Version())
if err != nil {
return 0, err
}

View file

@ -201,13 +201,13 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
}
//updated needle
if increIdxEntry.offset != 0 && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize {
if !increIdxEntry.offset.IsZero() && increIdxEntry.size != 0 && increIdxEntry.size != TombstoneFileSize {
//even the needle cache in memory is hit, the need_bytes is correct
glog.V(4).Infof("file %d offset %d size %d", key, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size)
glog.V(4).Infof("file %d offset %d size %d", key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size)
var needleBytes []byte
needleBytes, err = ReadNeedleBlob(oldDatFile, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size, v.Version())
needleBytes, err = ReadNeedleBlob(oldDatFile, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, v.Version())
if err != nil {
return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, int64(increIdxEntry.offset)*NeedlePaddingSize, increIdxEntry.size, err)
return fmt.Errorf("ReadNeedleBlob %s key %d offset %d size %d failed: %v", oldDatFile.Name(), key, increIdxEntry.offset.ToAcutalOffset(), increIdxEntry.size, err)
}
dst.Write(needleBytes)
util.Uint32toBytes(idxEntryBytes[8:12], uint32(offset/NeedlePaddingSize))
@ -261,8 +261,8 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *Needle, offset int64) er
}
nv, ok := scanner.v.nm.Get(n.Id)
glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
if ok && int64(nv.Offset)*NeedlePaddingSize == offset && nv.Size > 0 && nv.Size != TombstoneFileSize {
if err := scanner.nm.Put(n.Id, Offset(scanner.newOffset/NeedlePaddingSize), n.Size); err != nil {
if ok && nv.Offset.ToAcutalOffset() == offset && nv.Size > 0 && nv.Size != TombstoneFileSize {
if err := scanner.nm.Put(n.Id, ToOffset(scanner.newOffset), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
}
if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil {
@ -325,7 +325,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
newOffset := int64(v.SuperBlock.BlockSize())
WalkIndexFile(oldIndexFile, func(key NeedleId, offset Offset, size uint32) error {
if offset == 0 || size == TombstoneFileSize {
if offset.IsZero() || size == TombstoneFileSize {
return nil
}
@ -335,7 +335,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
}
n := new(Needle)
err := n.ReadData(v.dataFile, int64(offset)*NeedlePaddingSize, size, v.Version())
err := n.ReadData(v.dataFile, offset.ToAcutalOffset(), size, v.Version())
if err != nil {
return nil
}
@ -346,7 +346,7 @@ func (v *Volume) copyDataBasedOnIndexFile(dstName, idxName string) (err error) {
glog.V(4).Infoln("needle expected offset ", offset, "ok", ok, "nv", nv)
if nv.Offset == offset && nv.Size > 0 {
if err = nm.Put(n.Id, Offset(newOffset/NeedlePaddingSize), n.Size); err != nil {
if err = nm.Put(n.Id, ToOffset(newOffset), n.Size); err != nil {
return fmt.Errorf("cannot put needle: %s", err)
}
if _, _, _, err = n.Append(dst, v.Version()); err != nil {

View file

@ -1,29 +0,0 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
var (
indexFileName = flag.String("file", "", ".idx file to analyze")
)
func main() {
flag.Parse()
indexFile, err := os.OpenFile(*indexFileName, os.O_RDONLY, 0644)
if err != nil {
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
}
defer indexFile.Close()
storage.WalkIndexFile(indexFile, func(key types.NeedleId, offset types.Offset, size uint32) error {
fmt.Printf("key %d, offset %d, size %d, nextOffset %d\n", key, offset*8, size, int64(offset)*types.NeedlePaddingSize+int64(size))
return nil
})
}