1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-19 09:50:08 +02:00
This commit is contained in:
Chris Lu 2021-07-01 01:21:14 -07:00
parent 215b169562
commit b624090398
15 changed files with 36 additions and 36 deletions

View file

@ -2,10 +2,10 @@ package command
import (
"fmt"
"strings"
"strconv"
"time"
"os"
"strconv"
"strings"
"time"
)
func init() {
@ -13,7 +13,7 @@ func init() {
}
type parameter struct {
name string
name string
value string
}
@ -42,7 +42,7 @@ func runFuse(cmd *Command, args []string) bool {
option.Reset()
}
// dash separator read option until next space
// dash separator read option until next space
} else if rawArgs[i] == '-' {
for i++; i < rawArgsLen && rawArgs[i] != ' '; i++ {
option.WriteByte(rawArgs[i])
@ -50,7 +50,7 @@ func runFuse(cmd *Command, args []string) bool {
options = append(options, parameter{option.String(), "true"})
option.Reset()
// equal separator start option with pending value
// equal separator start option with pending value
} else if rawArgs[i] == '=' {
name := option.String()
option.Reset()
@ -62,13 +62,13 @@ func runFuse(cmd *Command, args []string) bool {
option.WriteByte(rawArgs[i])
}
// single quote separator read option until next single quote
// single quote separator read option until next single quote
} else if rawArgs[i] == '\'' {
for i++; i < rawArgsLen && rawArgs[i] != '\''; i++ {
option.WriteByte(rawArgs[i])
}
// add chars before comma
// add chars before comma
} else if rawArgs[i] != ' ' {
option.WriteByte(rawArgs[i])
}
@ -77,12 +77,12 @@ func runFuse(cmd *Command, args []string) bool {
options = append(options, parameter{name, option.String()})
option.Reset()
// comma separator just read current option
// comma separator just read current option
} else if rawArgs[i] == ',' {
options = append(options, parameter{option.String(), "true"})
option.Reset()
// what is not a separator fill option buffer
// what is not a separator fill option buffer
} else {
option.WriteByte(rawArgs[i])
}
@ -99,7 +99,7 @@ func runFuse(cmd *Command, args []string) bool {
for i := 0; i < len(options); i++ {
parameter := options[i]
switch parameter.name {
switch parameter.name {
case "child":
masterProcess = false
case "arg0":
@ -198,9 +198,9 @@ func runFuse(cmd *Command, args []string) bool {
arg0 := os.Args[0]
argv := append(os.Args, "-o", "child")
attr := os.ProcAttr{}
attr.Env = os.Environ()
attr := os.ProcAttr{}
attr.Env = os.Environ()
child, err := os.StartProcess(arg0, argv, &attr)
if err != nil {
@ -232,7 +232,7 @@ func runFuse(cmd *Command, args []string) bool {
var cmdFuse = &Command{
UsageLine: "fuse /mnt/mount/point -o \"filer=localhost:8888,filer.path=/\"",
Short: "Allow use weed with linux's mount command",
Short: "Allow use weed with linux's mount command",
Long: `Allow use weed with linux's mount command
You can use -t weed on mount command:

View file

@ -110,7 +110,7 @@ func runUpload(cmd *Command, args []string) bool {
})
if err != nil {
fmt.Println(err.Error())
return false;
return false
}
} else {
parts, e := operation.NewFileParts(args)

View file

@ -51,7 +51,7 @@ type VolumeServerOptions struct {
indexType *string
diskType *string
fixJpgOrientation *bool
readMode *string
readMode *string
cpuProfile *string
memProfile *string
compactionMBPerSecond *int

View file

@ -97,7 +97,7 @@ func (pages *TempFileDirtyPages) saveExistingPagesToStorage() {
for _, list := range pages.writtenIntervals.lists {
listStopOffset := list.Offset() + list.Size()
for uploadedOffset:=int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
for uploadedOffset := int64(0); uploadedOffset < listStopOffset; uploadedOffset += pageSize {
start, stop := max(list.Offset(), uploadedOffset), min(listStopOffset, uploadedOffset+pageSize)
if start >= stop {
continue

View file

@ -54,7 +54,7 @@ func (list *WrittenIntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.DataOffset), min(stop, t.DataOffset+t.Size)
if nodeStart < nodeStop {
// glog.V(4).Infof("copying start=%d stop=%d t=[%d,%d) => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.DataOffset, t.DataOffset+t.Size, len(buf), nodeStart, nodeStop)
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset + nodeStart - t.DataOffset)
list.tempFile.ReadAt(buf[nodeStart-start:nodeStop-start], t.TempOffset+nodeStart-t.DataOffset)
}
if t.Next == nil {

View file

@ -144,7 +144,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
file.dirtyMetadata = true
}
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode){
if req.Valid.Mode() && entry.Attributes.FileMode != uint32(req.Mode) {
entry.Attributes.FileMode = uint32(req.Mode)
file.dirtyMetadata = true
}

View file

@ -30,11 +30,11 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/notification"
_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"

View file

@ -143,7 +143,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
maxTimeout = time.Second * 10
startTime = time.Now()
)
for time.Now().Sub(startTime) < maxTimeout {
fid, count, dn, err := ms.Topo.PickForWrite(req.Count, option)
if err == nil {

View file

@ -97,7 +97,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
ms := &MasterServer{
option: option,
preallocateSize: preallocateSize,
vgCh: make(chan *topology.VolumeGrowRequest, 1 << 6),
vgCh: make(chan *topology.VolumeGrowRequest, 1<<6),
clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers),

View file

@ -123,7 +123,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
Count: writableVolumeCount,
ErrCh: errCh,
}
if err := <- errCh; err != nil {
if err := <-errCh; err != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("cannot grow volume group! %v", err))
return
}

View file

@ -28,7 +28,7 @@ type VolumeServer struct {
needleMapKind storage.NeedleMapKind
FixJpgOrientation bool
ReadMode string
ReadMode string
compactionBytePerSecond int64
metricsAddress string
metricsIntervalSec int
@ -72,7 +72,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
rack: rack,
needleMapKind: needleMapKind,
FixJpgOrientation: fixJpgOrientation,
ReadMode: readMode,
ReadMode: readMode,
grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.volume"),
compactionBytePerSecond: int64(compactionMBPerSecond) * 1024 * 1024,
fileSizeLimitBytes: int64(fileSizeLimitMB) * 1024 * 1024,

View file

@ -65,7 +65,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
lookupResult, err := operation.Lookup(vs.GetMaster, volumeId.String())
glog.V(2).Infoln("volume", volumeId, "found on", lookupResult, "error", err)
if err != nil || len(lookupResult.Locations) <= 0{
if err != nil || len(lookupResult.Locations) <= 0 {
glog.V(0).Infoln("lookup error:", err, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
return

View file

@ -52,7 +52,7 @@ func (n *Needle) prepareWriteBuffer(version Version, writeBytes *bytes.Buffer) (
writeBytes.Write(n.Data)
padding := PaddingLength(n.Size, version)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
writeBytes.Write(header[0:NeedleChecksumSize+padding])
writeBytes.Write(header[0 : NeedleChecksumSize+padding])
return size, actualSize, nil
case Version2, Version3:
header := make([]byte, NeedleHeaderSize+TimestampSize) // adding timestamp to reuse it and avoid extra allocation
@ -104,7 +104,7 @@ func (n *Needle) prepareWriteBuffer(version Version, writeBytes *bytes.Buffer) (
}
if n.HasLastModifiedDate() {
util.Uint64toBytes(header[0:8], n.LastModified)
writeBytes.Write(header[8-LastModifiedBytesLength:8])
writeBytes.Write(header[8-LastModifiedBytesLength : 8])
}
if n.HasTtl() && n.Ttl != nil {
n.Ttl.ToBytes(header[0:TtlBytesLength])
@ -119,11 +119,11 @@ func (n *Needle) prepareWriteBuffer(version Version, writeBytes *bytes.Buffer) (
padding := PaddingLength(n.Size, version)
util.Uint32toBytes(header[0:NeedleChecksumSize], n.Checksum.Value())
if version == Version2 {
writeBytes.Write(header[0:NeedleChecksumSize+padding])
writeBytes.Write(header[0 : NeedleChecksumSize+padding])
} else {
// version3
util.Uint64toBytes(header[NeedleChecksumSize:NeedleChecksumSize+TimestampSize], n.AppendAtNs)
writeBytes.Write(header[0:NeedleChecksumSize+TimestampSize+padding])
writeBytes.Write(header[0 : NeedleChecksumSize+TimestampSize+padding])
}
return Size(n.DataSize), GetActualSize(n.Size, version), nil

View file

@ -243,7 +243,7 @@ func (n *NodeImpl) CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSi
if v.Size >= volumeSizeLimit {
//fmt.Println("volume",v.Id,"size",v.Size,">",volumeSizeLimit)
n.GetTopology().chanFullVolumes <- v
}else if float64(v.Size) > float64(volumeSizeLimit) * growThreshold {
} else if float64(v.Size) > float64(volumeSizeLimit)*growThreshold {
n.GetTopology().chanCrowdedVolumes <- v
}
}

View file

@ -21,21 +21,21 @@ func SetupProfiling(cpuProfile, memProfile string) {
pprof.StopCPUProfile()
// write block pprof
blockF, err := os.Create(cpuProfile+".block")
blockF, err := os.Create(cpuProfile + ".block")
if err != nil {
return
}
p := pprof.Lookup("block")
p.WriteTo(blockF,0)
p.WriteTo(blockF, 0)
blockF.Close()
// write mutex pprof
mutexF, err := os.Create(cpuProfile+".mutex")
mutexF, err := os.Create(cpuProfile + ".mutex")
if err != nil {
return
}
p = pprof.Lookup("mutex")
p.WriteTo(mutexF,0)
p.WriteTo(mutexF, 0)
mutexF.Close()
})