From b9256e0b34fcd8ae62b89d1df5b61b7c35e204b4 Mon Sep 17 00:00:00 2001 From: guol-fnst Date: Tue, 19 Jul 2022 13:17:52 +0800 Subject: [PATCH] optimiz --- weed/storage/disk_location.go | 7 ++--- weed/storage/idx/walk.go | 3 +- weed/storage/needle_map_leveldb.go | 46 +++++++++++------------------- 3 files changed, 20 insertions(+), 36 deletions(-) diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go index d72d83208..8af8ea663 100644 --- a/weed/storage/disk_location.go +++ b/weed/storage/disk_location.go @@ -123,7 +123,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne if volumeName == "" { return false } - glog.V(0).Infof("data file %s", l.Directory+"/"+volumeName) + // skip if ec volumes exists if skipIfEcVolumesExists { if util.FileExists(l.Directory + "/" + volumeName + ".ecx") { @@ -147,7 +147,7 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne glog.Warningf("get volume id failed, %s, err : %s", volumeName, err) return false } - glog.V(0).Infof("data file %s", l.Directory+"/"+volumeName) + // avoid loading one volume more than once l.volumesLock.RLock() _, found := l.volumes[vid] @@ -156,7 +156,6 @@ func (l *DiskLocation) loadExistingVolume(dirEntry os.DirEntry, needleMapKind Ne glog.V(1).Infof("loaded volume, %v", vid) return true } - glog.V(0).Infof("data file %s", l.Directory+"/"+volumeName) // load the volume v, e := NewVolume(l.Directory, l.IdxDirectory, collection, vid, needleMapKind, nil, nil, 0, 0) @@ -223,8 +222,6 @@ func (l *DiskLocation) loadExistingVolumes(needleMapKind NeedleMapKind) { workerNum = 10 } } - workerNum = 10 - l.concurrentLoadingVolumes(needleMapKind, workerNum) glog.V(0).Infof("Store started on dir: %s with %d volumes max %d", l.Directory, len(l.volumes), l.MaxVolumeCount) diff --git a/weed/storage/idx/walk.go b/weed/storage/idx/walk.go index ef177ec2f..74cb83b45 100644 --- a/weed/storage/idx/walk.go +++ b/weed/storage/idx/walk.go @@ -42,7 +42,8 @@ func WalkIndexFile(r io.ReaderAt, fn func(key types.NeedleId, offset types.Offse return e } -func WalkIndexFileIncrent(r io.ReaderAt, milestone uint64, fn func(key types.NeedleId, offset types.Offset, size types.Size) error) error { +//copied from WalkIndexFile, just init readerOffset from milestone +func WalkIndexFileIncrement(r io.ReaderAt, milestone uint64, fn func(key types.NeedleId, offset types.Offset, size types.Size) error) error { var readerOffset = int64(milestone * types.NeedleMapEntrySize) bytes := make([]byte, types.NeedleMapEntrySize*RowsToRead) count, e := r.ReadAt(bytes, readerOffset) diff --git a/weed/storage/needle_map_leveldb.go b/weed/storage/needle_map_leveldb.go index f014797df..c26856ba3 100644 --- a/weed/storage/needle_map_leveldb.go +++ b/weed/storage/needle_map_leveldb.go @@ -20,8 +20,8 @@ import ( . "github.com/chrislusf/seaweedfs/weed/storage/types" ) -//use "2 >> 16" to reduce cpu cost -const milestoneCnt = 40 +//mark it every milestoneCnt operations +const milestoneCnt = 10000 const milestoneKey = 0xffffffffffffffff - 1 type LevelDbNeedleMap struct { @@ -32,24 +32,19 @@ type LevelDbNeedleMap struct { } func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Options) (m *LevelDbNeedleMap, err error) { - glog.V(0).Infof("NewLevelDbNeedleMap pocessing %s...", indexFile.Name()) - db, errd := leveldb.OpenFile(dbFileName, opts) - glog.V(0).Infof("begain %v %s %d", errd, dbFileName, getMileStone(db)) - db.Close() - m = &LevelDbNeedleMap{dbFileName: dbFileName} m.indexFile = indexFile if !isLevelDbFresh(dbFileName, indexFile) { - glog.V(0).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) + glog.V(1).Infof("Start to Generate %s from %s", dbFileName, indexFile.Name()) generateLevelDbFile(dbFileName, indexFile) - glog.V(0).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) + glog.V(1).Infof("Finished Generating %s from %s", dbFileName, indexFile.Name()) } if stat, err := indexFile.Stat(); err != nil { glog.Fatalf("stat file %s: %v", indexFile.Name(), err) } else { m.indexFileOffset = stat.Size() } - glog.V(0).Infof("Opening %s...", dbFileName) + glog.V(1).Infof("Opening %s...", dbFileName) if m.db, err = leveldb.OpenFile(dbFileName, opts); err != nil { if errors.IsCorrupted(err) { @@ -59,19 +54,18 @@ func NewLevelDbNeedleMap(dbFileName string, indexFile *os.File, opts *opt.Option return } } - glog.V(0).Infof("getMileStone %s : %d", dbFileName, getMileStone(m.db)) + glog.V(1).Infof("Loading %s... , milestone: %d", dbFileName, getMileStone(m.db)) m.recordNum = uint64(m.indexFileOffset / types.NeedleMapEntrySize) milestone := (m.recordNum / milestoneCnt) * milestoneCnt err = setMileStone(m.db, milestone) if err != nil { + glog.Fatalf("set milestone for %s error: %s\n", dbFileName, err) return } - glog.V(0).Infof("Loading %s... %d %d", indexFile.Name(), milestone, getMileStone(m.db)) mm, indexLoadError := newNeedleMapMetricFromIndexFile(indexFile) if indexLoadError != nil { return nil, indexLoadError } - glog.V(0).Infof("finish Loading %s...", indexFile.Name()) m.mapMetric = *mm return } @@ -106,14 +100,13 @@ func generateLevelDbFile(dbFileName string, indexFile *os.File) error { return err } else { if milestone*types.NeedleMapEntrySize > uint64(stat.Size()) { - glog.Warningf("wrong milestone %d for filesize %d, set milestone to 0", milestone, stat.Size()) - milestone = 0 + glog.Warningf("wrong milestone %d for filesize %d", milestone, stat.Size()) } glog.V(0).Infof("generateLevelDbFile %s, milestone %d, num of entries:%d", dbFileName, milestone, (uint64(stat.Size())-milestone*types.NeedleMapEntrySize)/types.NeedleMapEntrySize) } - return idx.WalkIndexFileIncrent(indexFile, milestone, func(key NeedleId, offset Offset, size Size) error { + return idx.WalkIndexFileIncrement(indexFile, milestone, func(key NeedleId, offset Offset, size Size) error { if !offset.IsZero() && size.IsValid() { - levelDbWrite(db, key, offset, size, 0) + levelDbWrite(db, key, offset, size, false, 0) } else { levelDbDelete(db, key) } @@ -144,16 +137,14 @@ func (m *LevelDbNeedleMap) Put(key NeedleId, offset Offset, size Size) error { if err := m.appendToIndexFile(key, offset, size); err != nil { return fmt.Errorf("cannot write to indexfile %s: %v", m.indexFile.Name(), err) } - //atomic.AddUint64(&m.recordNum, 1) - //milestone = atomic.LoadUint64(&m.recordNum) m.recordNum++ if m.recordNum%milestoneCnt != 0 { milestone = 0 } else { milestone = (m.recordNum / milestoneCnt) * milestoneCnt - glog.V(0).Infof("put cnt:%d milestone:%s %d", m.recordNum, m.dbFileName, milestone) + glog.V(1).Infof("put cnt:%d for %s,milestone: %d", m.recordNum, m.dbFileName, milestone) } - return levelDbWrite(m.db, key, offset, size, milestone) + return levelDbWrite(m.db, key, offset, size, milestone == 0, milestone) } func getMileStone(db *leveldb.DB) uint64 { @@ -175,7 +166,7 @@ func getMileStone(db *leveldb.DB) uint64 { } func setMileStone(db *leveldb.DB, milestone uint64) error { - glog.V(0).Infof("set milestone %d", milestone) + glog.V(1).Infof("set milestone %d", milestone) var mskBytes = make([]byte, 8) util.Uint64toBytes(mskBytes, milestoneKey) var msBytes = make([]byte, 8) @@ -183,11 +174,10 @@ func setMileStone(db *leveldb.DB, milestone uint64) error { if err := db.Put(mskBytes, msBytes, nil); err != nil { return fmt.Errorf("failed to setMileStone: %v", err) } - glog.V(0).Infof("ssset milestone %d, %d", milestone, getMileStone(db)) return nil } -func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, milestone uint64) error { +func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, upateMilstone bool, milestone uint64) error { bytes := needle_map.ToBytes(key, offset, size) @@ -195,8 +185,7 @@ func levelDbWrite(db *leveldb.DB, key NeedleId, offset Offset, size Size, milest return fmt.Errorf("failed to write leveldb: %v", err) } // set milestone - if milestone != 0 { - glog.V(0).Infof("actually set milestone %d", milestone) + if upateMilstone { return setMileStone(db, milestone) } return nil @@ -219,16 +208,13 @@ func (m *LevelDbNeedleMap) Delete(key NeedleId, offset Offset) error { if err := m.appendToIndexFile(key, offset, TombstoneFileSize); err != nil { return err } - //atomic.AddUint64(&m.recordNum, 1) - //milestone = atomic.LoadUint64(&m.recordNum) m.recordNum++ if m.recordNum%milestoneCnt != 0 { milestone = 0 } else { milestone = (m.recordNum / milestoneCnt) * milestoneCnt } - glog.V(0).Infof("delete cnt:%d milestone:%s %d", m.recordNum, m.dbFileName, milestone) - return levelDbWrite(m.db, key, oldNeedle.Offset, -oldNeedle.Size, milestone) + return levelDbWrite(m.db, key, oldNeedle.Offset, -oldNeedle.Size, milestone == 0, milestone) } func (m *LevelDbNeedleMap) Close() {