1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-07-01 22:56:38 +02:00

volume: add option to limit compaction speed

This commit is contained in:
Chris Lu 2019-05-03 17:22:39 -07:00
parent f0f981e7c8
commit b335f81a4f
9 changed files with 69 additions and 33 deletions

View file

@ -91,7 +91,7 @@ func runBackup(cmd *Command, args []string) bool {
} }
if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) { if v.SuperBlock.CompactionRevision < uint16(stats.CompactRevision) {
if err = v.Compact(0); err != nil { if err = v.Compact(0, 0); err != nil {
fmt.Printf("Compact Volume before synchronizing %v\n", err) fmt.Printf("Compact Volume before synchronizing %v\n", err)
return true return true
} }

View file

@ -43,7 +43,7 @@ func runCompact(cmd *Command, args []string) bool {
glog.Fatalf("Load Volume [ERROR] %s\n", err) glog.Fatalf("Load Volume [ERROR] %s\n", err)
} }
if *compactMethod == 0 { if *compactMethod == 0 {
if err = v.Compact(preallocate); err != nil { if err = v.Compact(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err) glog.Fatalf("Compact Volume [ERROR] %s\n", err)
} }
} else { } else {

View file

@ -94,6 +94,7 @@ func init() {
serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.") serverOptions.v.indexType = cmdServer.Flag.String("volume.index", "memory", "Choose [memory|leveldb|leveldbMedium|leveldbLarge] mode for memory~performance balance.")
serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.") serverOptions.v.fixJpgOrientation = cmdServer.Flag.Bool("volume.images.fix.orientation", false, "Adjust jpg orientation when uploading.")
serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.") serverOptions.v.readRedirect = cmdServer.Flag.Bool("volume.read.redirect", true, "Redirect moved or non-local volumes.")
serverOptions.v.compactionMBPerSecond = cmdServer.Flag.Int("volume.compactionMBps", 0, "limit compaction speed in mega bytes per second")
serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address") serverOptions.v.publicUrl = cmdServer.Flag.String("volume.publicUrl", "", "publicly accessible address")
s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets") s3Options.filerBucketsPath = cmdServer.Flag.String("s3.filer.dir.buckets", "/buckets", "folder on filer to store all buckets")

View file

@ -43,6 +43,7 @@ type VolumeServerOptions struct {
readRedirect *bool readRedirect *bool
cpuProfile *string cpuProfile *string
memProfile *string memProfile *string
compactionMBPerSecond *int
} }
func init() { func init() {
@ -63,6 +64,7 @@ func init() {
v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.") v.readRedirect = cmdVolume.Flag.Bool("read.redirect", true, "Redirect moved or non-local volumes.")
v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file") v.cpuProfile = cmdVolume.Flag.String("cpuprofile", "", "cpu profile output file")
v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file") v.memProfile = cmdVolume.Flag.String("memprofile", "", "memory profile output file")
v.compactionMBPerSecond = cmdVolume.Flag.Int("compactionMBps", 0, "limit compaction speed in mega bytes per second")
} }
var cmdVolume = &Command{ var cmdVolume = &Command{
@ -157,6 +159,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack, strings.Split(masters, ","), *v.pulseSeconds, *v.dataCenter, *v.rack,
v.whiteList, v.whiteList,
*v.fixJpgOrientation, *v.readRedirect, *v.fixJpgOrientation, *v.readRedirect,
*v.compactionMBPerSecond,
) )
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port) listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)

View file

@ -28,7 +28,7 @@ func (vs *VolumeServer) VacuumVolumeCompact(ctx context.Context, req *volume_ser
resp := &volume_server_pb.VacuumVolumeCompactResponse{} resp := &volume_server_pb.VacuumVolumeCompactResponse{}
err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate) err := vs.store.CompactVolume(needle.VolumeId(req.VolumeId), req.Preallocate, vs.compactionBytePerSecond)
if err != nil { if err != nil {
glog.Errorf("compact volume %d: %v", req.VolumeId, err) glog.Errorf("compact volume %d: %v", req.VolumeId, err)

View file

@ -20,9 +20,10 @@ type VolumeServer struct {
guard *security.Guard guard *security.Guard
grpcDialOption grpc.DialOption grpcDialOption grpc.DialOption
needleMapKind storage.NeedleMapType needleMapKind storage.NeedleMapType
FixJpgOrientation bool FixJpgOrientation bool
ReadRedirect bool ReadRedirect bool
compactionBytePerSecond int64
} }
func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string, func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
@ -33,20 +34,23 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
dataCenter string, rack string, dataCenter string, rack string,
whiteList []string, whiteList []string,
fixJpgOrientation bool, fixJpgOrientation bool,
readRedirect bool) *VolumeServer { readRedirect bool,
compactionMBPerSecond int,
) *VolumeServer {
v := viper.GetViper() v := viper.GetViper()
signingKey := v.GetString("jwt.signing.key") signingKey := v.GetString("jwt.signing.key")
enableUiAccess := v.GetBool("access.ui") enableUiAccess := v.GetBool("access.ui")
vs := &VolumeServer{ vs := &VolumeServer{
pulseSeconds: pulseSeconds, pulseSeconds: pulseSeconds,
dataCenter: dataCenter, dataCenter: dataCenter,
rack: rack, rack: rack,
needleMapKind: needleMapKind, needleMapKind: needleMapKind,
FixJpgOrientation: fixJpgOrientation, FixJpgOrientation: fixJpgOrientation,
ReadRedirect: readRedirect, ReadRedirect: readRedirect,
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"), grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"),
compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
} }
vs.MasterNodes = masterNodes vs.MasterNodes = masterNodes
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)

View file

@ -14,9 +14,9 @@ func (s *Store) CheckCompactVolume(volumeId needle.VolumeId) (float64, error) {
} }
return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId) return 0, fmt.Errorf("volume id %d is not found during check compact", volumeId)
} }
func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64) error { func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compactionBytePerSecond int64) error {
if v := s.findVolume(vid); v != nil { if v := s.findVolume(vid); v != nil {
return v.Compact(preallocate) return v.Compact(preallocate, compactionBytePerSecond)
} }
return fmt.Errorf("volume id %d is not found during compact", vid) return fmt.Errorf("volume id %d is not found during compact", vid)
} }

View file

@ -18,7 +18,7 @@ func (v *Volume) garbageLevel() float64 {
return float64(v.nm.DeletedSize()) / float64(v.ContentSize()) return float64(v.nm.DeletedSize()) / float64(v.ContentSize())
} }
func (v *Volume) Compact(preallocate int64) error { func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error {
glog.V(3).Infof("Compacting volume %d ...", v.Id) glog.V(3).Infof("Compacting volume %d ...", v.Id)
//no need to lock for copy on write //no need to lock for copy on write
//v.accessLock.Lock() //v.accessLock.Lock()
@ -29,7 +29,7 @@ func (v *Volume) Compact(preallocate int64) error {
v.lastCompactIndexOffset = v.nm.IndexFileSize() v.lastCompactIndexOffset = v.nm.IndexFileSize()
v.lastCompactRevision = v.SuperBlock.CompactionRevision v.lastCompactRevision = v.SuperBlock.CompactionRevision
glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset) glog.V(3).Infof("creating copies for volume %d ,last offset %d...", v.Id, v.lastCompactIndexOffset)
return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate) return v.copyDataAndGenerateIndexFile(filePath+".cpd", filePath+".cpx", preallocate, compactionBytePerSecond)
} }
func (v *Volume) Compact2() error { func (v *Volume) Compact2() error {
@ -236,12 +236,15 @@ func (v *Volume) makeupDiff(newDatFileName, newIdxFileName, oldDatFileName, oldI
} }
type VolumeFileScanner4Vacuum struct { type VolumeFileScanner4Vacuum struct {
version needle.Version version needle.Version
v *Volume v *Volume
dst *os.File dst *os.File
nm *NeedleMap nm *NeedleMap
newOffset int64 newOffset int64
now uint64 now uint64
compactionBytePerSecond int64
lastSizeCounter int64
lastSizeCheckTime time.Time
} }
func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error { func (scanner *VolumeFileScanner4Vacuum) VisitSuperBlock(superBlock SuperBlock) error {
@ -269,13 +272,32 @@ func (scanner *VolumeFileScanner4Vacuum) VisitNeedle(n *needle.Needle, offset in
if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil { if _, _, _, err := n.Append(scanner.dst, scanner.v.Version()); err != nil {
return fmt.Errorf("cannot append needle: %s", err) return fmt.Errorf("cannot append needle: %s", err)
} }
scanner.newOffset += n.DiskSize(scanner.version) delta := n.DiskSize(scanner.version)
scanner.newOffset += delta
scanner.maybeSlowdown(delta)
glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size) glog.V(4).Infoln("saving key", n.Id, "volume offset", offset, "=>", scanner.newOffset, "data_size", n.Size)
} }
return nil return nil
} }
func (scanner *VolumeFileScanner4Vacuum) maybeSlowdown(delta int64) {
if scanner.compactionBytePerSecond > 0 {
scanner.lastSizeCounter += delta
now := time.Now()
elapsedDuration := now.Sub(scanner.lastSizeCheckTime)
if elapsedDuration > 100*time.Millisecond {
overLimitBytes := scanner.lastSizeCounter - scanner.compactionBytePerSecond/10
if overLimitBytes > 0 {
overRatio := float64(overLimitBytes) / float64(scanner.compactionBytePerSecond)
sleepTime := time.Duration(overRatio*1000) * time.Millisecond
// glog.V(0).Infof("currently %d bytes, limit to %d bytes, over by %d bytes, sleeping %v over %.4f", scanner.lastSizeCounter, scanner.compactionBytePerSecond/10, overLimitBytes, sleepTime, overRatio)
time.Sleep(sleepTime)
}
scanner.lastSizeCounter, scanner.lastSizeCheckTime = 0, time.Now()
}
}
}
func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64) (err error) { func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, preallocate int64, compactionBytePerSecond int64) (err error) {
var ( var (
dst, idx *os.File dst, idx *os.File
) )
@ -290,10 +312,12 @@ func (v *Volume) copyDataAndGenerateIndexFile(dstName, idxName string, prealloca
defer idx.Close() defer idx.Close()
scanner := &VolumeFileScanner4Vacuum{ scanner := &VolumeFileScanner4Vacuum{
v: v, v: v,
now: uint64(time.Now().Unix()), now: uint64(time.Now().Unix()),
nm: NewBtreeNeedleMap(idx), nm: NewBtreeNeedleMap(idx),
dst: dst, dst: dst,
compactionBytePerSecond: compactionBytePerSecond,
lastSizeCheckTime: time.Now(),
} }
err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner) err = ScanVolumeFile(v.dir, v.Collection, v.Id, v.needleMapKind, scanner)
return return

View file

@ -5,6 +5,7 @@ import (
"math/rand" "math/rand"
"os" "os"
"testing" "testing"
"time"
"github.com/chrislusf/seaweedfs/weed/storage/needle" "github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/types" "github.com/chrislusf/seaweedfs/weed/storage/types"
@ -72,8 +73,8 @@ func TestCompaction(t *testing.T) {
t.Fatalf("volume creation: %v", err) t.Fatalf("volume creation: %v", err)
} }
beforeCommitFileCount := 1000 beforeCommitFileCount := 10000
afterCommitFileCount := 1000 afterCommitFileCount := 10000
infos := make([]*needleInfo, beforeCommitFileCount+afterCommitFileCount) infos := make([]*needleInfo, beforeCommitFileCount+afterCommitFileCount)
@ -81,7 +82,10 @@ func TestCompaction(t *testing.T) {
doSomeWritesDeletes(i, v, t, infos) doSomeWritesDeletes(i, v, t, infos)
} }
v.Compact(0) startTime := time.Now()
v.Compact(0, 1024*1024)
speed := float64(v.ContentSize()) / time.Now().Sub(startTime).Seconds()
t.Logf("compaction speed: %.2f bytes/s", speed)
for i := 1; i <= afterCommitFileCount; i++ { for i := 1; i <= afterCommitFileCount; i++ {
doSomeWritesDeletes(i+beforeCommitFileCount, v, t, infos) doSomeWritesDeletes(i+beforeCommitFileCount, v, t, infos)