switch to logrus

losing filename and line number. Critical for debugging.
This commit is contained in:
Chris Lu 2020-11-16 22:26:58 -08:00
parent 9add554feb
commit 6c9156b25f
205 changed files with 1340 additions and 1246 deletions

2
go.mod
View File

@ -9,6 +9,7 @@ require (
github.com/OneOfOne/xxhash v1.2.2
github.com/Shopify/sarama v1.23.1
github.com/aws/aws-sdk-go v1.33.5
github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e
github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
github.com/cespare/xxhash v1.1.0
github.com/chrislusf/raft v1.0.3
@ -60,6 +61,7 @@ require (
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
github.com/seaweedfs/fuse v1.0.7
github.com/seaweedfs/goexif v1.0.2
github.com/sirupsen/logrus v1.4.2
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect

2
go.sum
View File

@ -62,6 +62,8 @@ github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN
github.com/aws/aws-sdk-go v1.33.5 h1:p2fr1ryvNTU6avUWLI+/H7FGv0TBIjzVM5WDgXBBv4U=
github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e h1:ZOnKnYG1LLgq4W7wZUYj9ntn3RxQ65EZyYqdtFpP2Dw=
github.com/banzaicloud/logrus-runtime-formatter v0.0.0-20190729070250-5ae5475bae5e/go.mod h1:hEvEpPmuwKO+0TbrDQKIkmX0gW2s2waZHF8pIhEEmpM=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=

View File

@ -7,7 +7,7 @@ import (
"path"
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -46,7 +46,7 @@ func main() {
}
datFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".dat"), os.O_RDWR, 0644)
if err != nil {
glog.Fatalf("Open Volume Data File [ERROR]: %v", err)
log.Fatalf("Open Volume Data File [ERROR]: %v", err)
}
datBackend := backend.NewDiskFile(datFile)
defer datBackend.Close()
@ -54,7 +54,7 @@ func main() {
superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
glog.Fatalf("cannot parse existing super block: %v", err)
log.Fatalf("cannot parse existing super block: %v", err)
}
fmt.Printf("Current Volume Replication: %s\n", superBlock.ReplicaPlacement)
@ -66,7 +66,7 @@ func main() {
replica, err := super_block.NewReplicaPlacementFromString(*targetReplica)
if err != nil {
glog.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
log.Fatalf("cannot parse target replica %s: %v", *targetReplica, err)
}
fmt.Printf("Changing replication to: %s\n", replica)
@ -79,7 +79,7 @@ func main() {
ttl, err := needle.ReadTTL(*targetTTL)
if err != nil {
glog.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
log.Fatalf("cannot parse target ttl %s: %v", *targetTTL, err)
}
fmt.Printf("Changing ttl to: %s\n", ttl)
@ -93,7 +93,7 @@ func main() {
header := superBlock.Bytes()
if n, e := datFile.WriteAt(header, 0); n == 0 || e != nil {
glog.Fatalf("cannot write super block: %v", e)
log.Fatalf("cannot write super block: %v", e)
}
fmt.Println("Change Applied.")

View File

@ -11,7 +11,7 @@ import (
"os"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@ -47,7 +47,7 @@ func main() {
vid := uint32(*volumeId)
servers := strings.Split(*serversStr, ",")
if len(servers) < 2 {
glog.Fatalf("You must specify more than 1 server\n")
log.Fatalf("You must specify more than 1 server\n")
}
var referenceServer string
var maxOffset int64
@ -55,7 +55,7 @@ func main() {
for _, addr := range servers {
files, offset, err := getVolumeFiles(vid, addr)
if err != nil {
glog.Fatalf("Failed to copy idx from volume server %s\n", err)
log.Fatalf("Failed to copy idx from volume server %s\n", err)
}
allFiles[addr] = files
if offset > maxOffset {
@ -101,7 +101,7 @@ func main() {
id, err = getNeedleFileId(vid, nid, addr)
}
if err != nil {
glog.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
log.Fatalf("Failed to get needle info %d from volume server %s\n", nid, err)
}
fmt.Println(id, addr, diffMsg)
}

View File

@ -8,7 +8,7 @@ import (
"path"
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -42,26 +42,26 @@ func main() {
}
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil {
glog.Fatalf("Read Volume Index %v", err)
log.Fatalf("Read Volume Index %v", err)
}
defer indexFile.Close()
datFileName := path.Join(*fixVolumePath, fileName+".dat")
datFile, err := os.OpenFile(datFileName, os.O_RDONLY, 0644)
if err != nil {
glog.Fatalf("Read Volume Data %v", err)
log.Fatalf("Read Volume Data %v", err)
}
datBackend := backend.NewDiskFile(datFile)
defer datBackend.Close()
newDatFile, err := os.Create(path.Join(*fixVolumePath, fileName+".dat_fixed"))
if err != nil {
glog.Fatalf("Write New Volume Data %v", err)
log.Fatalf("Write New Volume Data %v", err)
}
defer newDatFile.Close()
superBlock, err := super_block.ReadSuperBlock(datBackend)
if err != nil {
glog.Fatalf("Read Volume Data superblock %v", err)
log.Fatalf("Read Volume Data superblock %v", err)
}
newDatFile.Write(superBlock.Bytes())

View File

@ -6,7 +6,7 @@ import (
"os"
"path/filepath"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/backend"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
@ -49,7 +49,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
newFileName := filepath.Join(*volumePath, "dat_fixed")
newDatFile, err := os.Create(newFileName)
if err != nil {
glog.Fatalf("Write New Volume Data %v", err)
log.Fatalf("Write New Volume Data %v", err)
}
scanner.datBackend = backend.NewDiskFile(newDatFile)
scanner.datBackend.WriteAt(scanner.block.Bytes(), 0)
@ -58,7 +58,7 @@ func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset in
checksum := Checksum(n)
if scanner.hashes[checksum] {
glog.V(0).Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
log.Infof("duplicate checksum:%s fid:%d,%s%x @ offset:%d", checksum, *volumeId, n.Id, n.Cookie, offset)
return nil
}
scanner.hashes[checksum] = true
@ -83,13 +83,13 @@ func main() {
if _, err := os.Stat(scanner.dir); err != nil {
if err := os.MkdirAll(scanner.dir, os.ModePerm); err != nil {
glog.Fatalf("could not create output dir : %s", err)
log.Fatalf("could not create output dir : %s", err)
}
}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil {
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
log.Fatalf("Reading Volume File [ERROR] %s\n", err)
}
}

View File

@ -5,7 +5,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/super_block"
@ -32,7 +32,7 @@ func (scanner *VolumeFileScanner4SeeDat) ReadNeedleBody() bool {
func (scanner *VolumeFileScanner4SeeDat) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
t := time.Unix(int64(n.AppendAtNs)/int64(time.Second), int64(n.AppendAtNs)%int64(time.Second))
glog.V(0).Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
log.Infof("%d,%s%x offset %d size %d(%s) cookie %x appendedAt %v",
*volumeId, n.Id, n.Cookie, offset, n.Size, util.BytesToHumanReadable(uint64(n.Size)), n.Cookie, t)
return nil
}
@ -45,6 +45,6 @@ func main() {
scanner := &VolumeFileScanner4SeeDat{}
err := storage.ScanVolumeFile(*volumePath, *volumeCollection, vid, storage.NeedleMapInMemory, scanner)
if err != nil {
glog.Fatalf("Reading Volume File [ERROR] %s\n", err)
log.Fatalf("Reading Volume File [ERROR] %s\n", err)
}
}

View File

@ -8,7 +8,7 @@ import (
"path"
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/idx"
"github.com/chrislusf/seaweedfs/weed/storage/types"
)
@ -32,7 +32,7 @@ func main() {
}
indexFile, err := os.OpenFile(path.Join(*fixVolumePath, fileName+".idx"), os.O_RDONLY, 0644)
if err != nil {
glog.Fatalf("Create Volume Index [ERROR] %s\n", err)
log.Fatalf("Create Volume Index [ERROR] %s\n", err)
}
defer indexFile.Close()

View File

@ -16,7 +16,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
@ -119,7 +119,7 @@ func runBenchmark(cmd *Command, args []string) bool {
if *b.cpuprofile != "" {
f, err := os.Create(*b.cpuprofile)
if err != nil {
glog.Fatal(err)
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
@ -310,7 +310,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan bool) {
file, err := os.OpenFile(fileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
glog.Fatalf("File to create file %s: %s\n", fileName, err)
log.Fatalf("File to create file %s: %s\n", fileName, err)
}
defer file.Close()
@ -329,7 +329,7 @@ func writeFileIds(fileName string, fileIdLineChan chan string, finishChan chan b
func readFileIds(fileName string, fileIdLineChan chan string) {
file, err := os.Open(fileName) // For read access.
if err != nil {
glog.Fatalf("File to read file %s: %s\n", fileName, err)
log.Fatalf("File to read file %s: %s\n", fileName, err)
}
defer file.Close()

View File

@ -1,7 +1,7 @@
package command
import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/util"
@ -44,15 +44,15 @@ func runCompact(cmd *Command, args []string) bool {
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid,
storage.NeedleMapInMemory, nil, nil, preallocate, 0)
if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
log.Fatalf("Load Volume [ERROR] %s\n", err)
}
if *compactMethod == 0 {
if err = v.Compact(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
log.Fatalf("Compact Volume [ERROR] %s\n", err)
}
} else {
if err = v.Compact2(preallocate, 0); err != nil {
glog.Fatalf("Compact Volume [ERROR] %s\n", err)
log.Fatalf("Compact Volume [ERROR] %s\n", err)
}
}

View File

@ -13,7 +13,7 @@ import (
"text/template"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
@ -111,11 +111,11 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
vid := scanner.vid
nv, ok := needleMap.Get(n.Id)
glog.V(3).Infof("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
log.Tracef("key %d offset %d size %d disk_size %d compressed %v ok %v nv %+v",
n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed(), ok, nv)
if *showDeleted && n.Size > 0 || ok && nv.Size.IsValid() && nv.Offset.ToAcutalOffset() == offset {
if newerThanUnix >= 0 && n.HasLastModifiedDate() && n.LastModified < uint64(newerThanUnix) {
glog.V(3).Infof("Skipping this file, as it's old enough: LastModified %d vs %d",
log.Tracef("Skipping this file, as it's old enough: LastModified %d vs %d",
n.LastModified, newerThanUnix)
return nil
}
@ -139,9 +139,9 @@ func (scanner *VolumeFileScanner4Export) VisitNeedle(n *needle.Needle, offset in
printNeedle(vid, n, scanner.version, true, offset, n.DiskSize(scanner.version))
}
}
glog.V(2).Infof("This seems deleted %d size %d", n.Id, n.Size)
log.Debugf("This seems deleted %d size %d", n.Id, n.Size)
} else {
glog.V(2).Infof("Skipping later-updated Id %d size %d", n.Id, n.Size)
log.Debugf("Skipping later-updated Id %d size %d", n.Id, n.Size)
}
return nil
}
@ -178,7 +178,7 @@ func runExport(cmd *Command, args []string) bool {
outputFile = os.Stdout
} else {
if outputFile, err = os.Create(*output); err != nil {
glog.Fatalf("cannot open output tar %s: %s", *output, err)
log.Fatalf("cannot open output tar %s: %s", *output, err)
}
}
defer outputFile.Close()
@ -201,7 +201,7 @@ func runExport(cmd *Command, args []string) bool {
defer needleMap.Close()
if err := needleMap.LoadFromIdx(path.Join(util.ResolvePath(*export.dir), fileName+".idx")); err != nil {
glog.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
log.Fatalf("cannot load needle map from %s.idx: %s", fileName, err)
}
volumeFileScanner := &VolumeFileScanner4Export{
@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool {
err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
if err != nil && err != io.EOF {
glog.Fatalf("Export Volume File [ERROR] %s\n", err)
log.Fatalf("Export Volume File [ERROR] %s\n", err)
}
return true
}

View File

@ -9,7 +9,7 @@ import (
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@ -152,37 +152,37 @@ func (fo *FilerOptions) startFiler() {
Filers: peers,
})
if nfs_err != nil {
glog.Fatalf("Filer startup error: %v", nfs_err)
log.Fatalf("Filer startup error: %v", nfs_err)
}
if *fo.publicPort != 0 {
publicListeningAddress := *fo.bindIp + ":" + strconv.Itoa(*fo.publicPort)
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
log.Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, 0)
if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
log.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
}
go func() {
if e := http.Serve(publicListener, publicVolumeMux); e != nil {
glog.Fatalf("Volume server fail to serve public: %v", e)
log.Fatalf("Volume server fail to serve public: %v", e)
}
}()
}
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
log.Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
filerListener, e := util.NewListener(
*fo.bindIp+":"+strconv.Itoa(*fo.port),
time.Duration(10)*time.Second,
)
if e != nil {
glog.Fatalf("Filer listener error: %v", e)
log.Fatalf("Filer listener error: %v", e)
}
// starting grpc server
grpcPort := *fo.port + 10000
grpcL, err := util.NewListener(*fo.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
@ -191,7 +191,7 @@ func (fo *FilerOptions) startFiler() {
httpS := &http.Server{Handler: defaultMux}
if err := httpS.Serve(filerListener); err != nil {
glog.Fatalf("Filer Fail to serve: %v", e)
log.Fatalf("Filer Fail to serve: %v", e)
}
}

View File

@ -4,7 +4,7 @@ import (
"context"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/replication"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
_ "github.com/chrislusf/seaweedfs/weed/replication/sink/azuresink"
@ -48,10 +48,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, input := range sub.NotificationInputs {
if config.GetBool("notification." + input.GetName() + ".enabled") {
if err := input.Initialize(config, "notification."+input.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification input for %s: %+v",
log.Fatalf("Failed to initialize notification input for %s: %+v",
input.GetName(), err)
}
glog.V(0).Infof("Configure notification input to %s", input.GetName())
log.Infof("Configure notification input to %s", input.GetName())
notificationInput = input
break
}
@ -69,7 +69,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
fromDir := config.GetString("source.filer.directory")
toDir := config.GetString("sink.filer.directory")
if strings.HasPrefix(toDir, fromDir) {
glog.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
log.Fatalf("recursive replication! source directory %s includes the sink directory %s", fromDir, toDir)
}
}
}
@ -78,10 +78,10 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for _, sk := range sink.Sinks {
if config.GetBool("sink." + sk.GetName() + ".enabled") {
if err := sk.Initialize(config, "sink."+sk.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize sink for %s: %+v",
log.Fatalf("Failed to initialize sink for %s: %+v",
sk.GetName(), err)
}
glog.V(0).Infof("Configure sink to %s", sk.GetName())
log.Infof("Configure sink to %s", sk.GetName())
dataSink = sk
break
}
@ -100,7 +100,7 @@ func runFilerReplicate(cmd *Command, args []string) bool {
for {
key, m, err := notificationInput.ReceiveMessage()
if err != nil {
glog.Errorf("receive %s: %+v", key, err)
log.Errorf("receive %s: %+v", key, err)
continue
}
if key == "" {
@ -108,16 +108,16 @@ func runFilerReplicate(cmd *Command, args []string) bool {
continue
}
if m.OldEntry != nil && m.NewEntry == nil {
glog.V(1).Infof("delete: %s", key)
log.Debugf("delete: %s", key)
} else if m.OldEntry == nil && m.NewEntry != nil {
glog.V(1).Infof(" add: %s", key)
log.Debugf(" add: %s", key)
} else {
glog.V(1).Infof("modify: %s", key)
log.Debugf("modify: %s", key)
}
if err = replicator.Replicate(context.Background(), key, m); err != nil {
glog.Errorf("replicate %s: %+v", key, err)
log.Errorf("replicate %s: %+v", key, err)
} else {
glog.V(1).Infof("replicated %s", key)
log.Debugf("replicated %s", key)
}
}
@ -130,7 +130,7 @@ func validateOneEnabledInput(config *viper.Viper) {
if enabledInput == "" {
enabledInput = input.GetName()
} else {
glog.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
log.Fatalf("Notification input is enabled for both %s and %s", enabledInput, input.GetName())
}
}
}

View File

@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication"
@ -89,7 +89,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerA, *syncOptions.aPath, *syncOptions.filerB,
*syncOptions.bPath, *syncOptions.bReplication, *syncOptions.bCollection, *syncOptions.bTtlSec, *syncOptions.bDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
log.Errorf("sync from %s to %s: %v", *syncOptions.filerA, *syncOptions.filerB, err)
time.Sleep(1747 * time.Millisecond)
}
}
@ -101,7 +101,7 @@ func runFilerSynchronize(cmd *Command, args []string) bool {
err := doSubscribeFilerMetaChanges(grpcDialOption, *syncOptions.filerB, *syncOptions.bPath, *syncOptions.filerA,
*syncOptions.aPath, *syncOptions.aReplication, *syncOptions.aCollection, *syncOptions.aTtlSec, *syncOptions.aDebug)
if err != nil {
glog.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
log.Errorf("sync from %s to %s: %v", *syncOptions.filerB, *syncOptions.filerA, err)
time.Sleep(2147 * time.Millisecond)
}
}
@ -134,7 +134,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
return err
}
glog.V(0).Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
log.Infof("start sync %s(%d) => %s(%d) from %v(%d)", sourceFiler, sourceFilerSignature, targetFiler, targetFilerSignature, time.Unix(0, sourceFilerOffsetTsNs), sourceFilerOffsetTsNs)
// create filer sink
filerSource := &source.FilerSource{}
@ -264,7 +264,7 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
counter++
if lastWriteTime.Add(3 * time.Second).Before(time.Now()) {
glog.V(0).Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
log.Infof("sync %s => %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, resp.TsNs), float64(counter)/float64(3))
counter = 0
lastWriteTime = time.Now()
if err := writeSyncOffset(grpcDialOption, targetFiler, sourceFilerSignature, resp.TsNs); err != nil {

View File

@ -5,7 +5,7 @@ import (
"path"
"strconv"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/chrislusf/seaweedfs/weed/storage/needle_map"
@ -47,12 +47,12 @@ func (scanner *VolumeFileScanner4Fix) ReadNeedleBody() bool {
}
func (scanner *VolumeFileScanner4Fix) VisitNeedle(n *needle.Needle, offset int64, needleHeader, needleBody []byte) error {
glog.V(2).Infof("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
log.Debugf("key %d offset %d size %d disk_size %d compressed %v", n.Id, offset, n.Size, n.DiskSize(scanner.version), n.IsCompressed())
if n.Size.IsValid() {
pe := scanner.nm.Set(n.Id, types.ToOffset(offset), n.Size)
glog.V(2).Infof("saved %d with error %v", n.Size, pe)
log.Debugf("saved %d with error %v", n.Size, pe)
} else {
glog.V(2).Infof("skipping deleted file ...")
log.Debugf("skipping deleted file ...")
return scanner.nm.Delete(n.Id)
}
return nil
@ -79,12 +79,12 @@ func runFix(cmd *Command, args []string) bool {
}
if err := storage.ScanVolumeFile(util.ResolvePath(*fixVolumePath), *fixVolumeCollection, vid, storage.NeedleMapInMemory, scanner); err != nil {
glog.Fatalf("scan .dat File: %v", err)
log.Fatalf("scan .dat File: %v", err)
os.Remove(indexFileName)
}
if err := nm.SaveToIdx(indexFileName); err != nil {
glog.Fatalf("save to .idx File: %v", err)
log.Fatalf("save to .idx File: %v", err)
os.Remove(indexFileName)
}

View File

@ -14,7 +14,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@ -94,7 +94,7 @@ func runMaster(cmd *Command, args []string) bool {
os.MkdirAll(*m.metaFolder, 0755)
}
if err := util.TestFolderWritable(util.ResolvePath(*m.metaFolder)); err != nil {
glog.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
log.Fatalf("Check Meta Folder (-mdir) Writable %s : %s", *m.metaFolder, err)
}
var masterWhiteList []string
@ -102,7 +102,7 @@ func runMaster(cmd *Command, args []string) bool {
masterWhiteList = strings.Split(*m.whiteList, ",")
}
if *m.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("volumeSizeLimitMB should be smaller than 30000")
log.Fatalf("volumeSizeLimitMB should be smaller than 30000")
}
startMaster(m, masterWhiteList)
@ -119,16 +119,16 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), peers)
listeningAddress := *masterOption.ipBind + ":" + strconv.Itoa(*masterOption.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
log.Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
masterListener, e := util.NewListener(listeningAddress, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
log.Fatalf("Master startup error: %v", e)
}
// start raftServer
raftServer, err := weed_server.NewRaftServer(security.LoadClientTLS(util.GetViper(), "grpc.master"),
peers, myMasterAddress, util.ResolvePath(*masterOption.metaFolder), ms.Topo, *masterOption.raftResumeState)
if raftServer == nil {
glog.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
log.Fatalf("please verify %s is writable, see https://github.com/chrislusf/seaweedfs/issues/717: %s", *masterOption.metaFolder, err)
}
ms.SetRaftServer(raftServer)
r.HandleFunc("/cluster/status", raftServer.StatusHandler).Methods("GET")
@ -136,14 +136,14 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
grpcPort := *masterOption.port + 10000
grpcL, err := util.NewListener(*masterOption.ipBind+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
log.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
}
// Create your protocol servers.
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms)
protobuf.RegisterRaftServer(grpcS, raftServer)
reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
log.Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
go grpcS.Serve(grpcL)
go func() {
@ -165,7 +165,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
}
func checkPeers(masterIp string, masterPort int, peers string) (masterAddress string, cleanedPeers []string) {
glog.V(0).Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
log.Infof("current: %s:%d peers:%s", masterIp, masterPort, peers)
masterAddress = masterIp + ":" + strconv.Itoa(masterPort)
if peers != "" {
cleanedPeers = strings.Split(peers, ",")
@ -183,7 +183,7 @@ func checkPeers(masterIp string, masterPort int, peers string) (masterAddress st
cleanedPeers = append(cleanedPeers, masterAddress)
}
if len(cleanedPeers)%2 == 0 {
glog.Fatalf("Only odd number of masters are supported!")
log.Fatalf("Only odd number of masters are supported!")
}
return
}

View File

@ -19,7 +19,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@ -54,7 +54,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(filer)
if err != nil {
glog.V(0).Infof("ParseFilerGrpcAddress: %v", err)
log.Infof("ParseFilerGrpcAddress: %v", err)
return true
}
@ -70,7 +70,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
return nil
})
if err != nil {
glog.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
log.Infof("failed to talk to filer %s: %v", filerGrpcAddress, err)
return true
}
@ -130,7 +130,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// Ensure target mount point availability
if isValid := checkMountPointAvailable(dir); !isValid {
glog.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
log.Fatalf("Expected mount to still be active, target mount point: %s, please check!", dir)
return true
}
@ -194,7 +194,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
// mount
c, err := fuse.Mount(dir, options...)
if err != nil {
glog.V(0).Infof("mount: %v", err)
log.Infof("mount: %v", err)
return true
}
defer fuse.Unmount(dir)
@ -204,13 +204,13 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
c.Close()
})
glog.V(0).Infof("mounted %s%s to %s", filer, mountRoot, dir)
log.Infof("mounted %s%s to %s", filer, mountRoot, dir)
err = fs.Serve(c, seaweedFileSystem)
// check if the mount process has an error to report
<-c.Ready
if err := c.MountError; err != nil {
glog.V(0).Infof("mount process: %v", err)
log.Infof("mount process: %v", err)
return true
}

View File

@ -10,7 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util/grace"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/messaging/broker"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -65,7 +65,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*msgBrokerOpt.filer)
if err != nil {
glog.Fatal(err)
log.Fatal(err)
return false
}
@ -82,10 +82,10 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
log.Infof("wait to connect to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
log.Infof("connected to filer %s grpc address %s", *msgBrokerOpt.filer, filerGrpcAddress)
break
}
}
@ -102,7 +102,7 @@ func (msgBrokerOpt *MessageBrokerOptions) startQueueServer() bool {
// start grpc listener
grpcL, err := util.NewListener(":"+strconv.Itoa(*msgBrokerOpt.port), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
log.Fatalf("failed to listen on grpc port %d: %v", *msgBrokerOpt.port, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.msg_broker"))
messaging_pb.RegisterSeaweedMessagingServer(grpcS, qs)

View File

@ -12,7 +12,7 @@ import (
"github.com/gorilla/mux"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/s3api"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/util"
@ -137,7 +137,7 @@ func (s3opt *S3Options) startS3Server() bool {
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*s3opt.filer)
if err != nil {
glog.Fatal(err)
log.Fatal(err)
return false
}
@ -157,14 +157,14 @@ func (s3opt *S3Options) startS3Server() bool {
}
filerBucketsPath = resp.DirBuckets
metricsAddress, metricsIntervalSec = resp.MetricsAddress, int(resp.MetricsIntervalSec)
glog.V(0).Infof("S3 read filer buckets dir: %s", filerBucketsPath)
log.Infof("S3 read filer buckets dir: %s", filerBucketsPath)
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
log.Infof("wait to connect to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
log.Infof("connected to filer %s grpc address %s", *s3opt.filer, filerGrpcAddress)
break
}
}
@ -183,7 +183,7 @@ func (s3opt *S3Options) startS3Server() bool {
GrpcDialOption: grpcDialOption,
})
if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
log.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)
}
httpS := &http.Server{Handler: router}
@ -191,18 +191,18 @@ func (s3opt *S3Options) startS3Server() bool {
listenAddress := fmt.Sprintf(":%d", *s3opt.port)
s3ApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
glog.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
log.Fatalf("S3 API Server listener on %s error: %v", listenAddress, err)
}
if *s3opt.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
log.Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
if err = httpS.ServeTLS(s3ApiListener, *s3opt.tlsCertificate, *s3opt.tlsPrivateKey); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
log.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
log.Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
if err = httpS.Serve(s3ApiListener); err != nil {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
log.Fatalf("S3 API Server Fail to serve: %v", err)
}
}

View File

@ -10,7 +10,7 @@ import (
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -124,7 +124,7 @@ func runServer(cmd *Command, args []string) bool {
if *serverOptions.cpuprofile != "" {
f, err := os.Create(*serverOptions.cpuprofile)
if err != nil {
glog.Fatal(err)
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
@ -175,14 +175,14 @@ func runServer(cmd *Command, args []string) bool {
folders := strings.Split(*volumeDataFolders, ",")
if *masterOptions.volumeSizeLimitMB > util.VolumeSizeLimitGB*1000 {
glog.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
log.Fatalf("masterVolumeSizeLimitMB should be less than 30000")
}
if *masterOptions.metaFolder == "" {
*masterOptions.metaFolder = folders[0]
}
if err := util.TestFolderWritable(util.ResolvePath(*masterOptions.metaFolder)); err != nil {
glog.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
log.Fatalf("Check Meta Folder (-mdir=\"%s\") Writable: %s", *masterOptions.metaFolder, err)
}
filerOptions.defaultLevelDbDirectory = masterOptions.metaFolder

View File

@ -22,7 +22,7 @@ import (
"google.golang.org/grpc/reflection"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
"github.com/chrislusf/seaweedfs/weed/server"
stats_collect "github.com/chrislusf/seaweedfs/weed/stats"
@ -125,7 +125,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
v.folders = strings.Split(volumeFolders, ",")
for _, folder := range v.folders {
if err := util.TestFolderWritable(util.ResolvePath(folder)); err != nil {
glog.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
log.Fatalf("Check Data Folder(-dir) Writable %s : %s", folder, err)
}
}
@ -135,7 +135,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if max, e := strconv.Atoi(maxString); e == nil {
v.folderMaxLimits = append(v.folderMaxLimits, max)
} else {
glog.Fatalf("The max specified in -max not a valid number %s", maxString)
log.Fatalf("The max specified in -max not a valid number %s", maxString)
}
}
if len(v.folderMaxLimits) == 1 && len(v.folders) > 1 {
@ -144,7 +144,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
}
if len(v.folders) != len(v.folderMaxLimits) {
glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
log.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
}
// set minFreeSpacePercent
@ -153,7 +153,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if value, e := strconv.ParseFloat(freeString, 32); e == nil {
v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
} else {
glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
log.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
}
}
if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
@ -162,7 +162,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
}
}
if len(v.folders) != len(v.minFreeSpacePercents) {
glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
log.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
}
// security related white list configuration
@ -172,7 +172,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if *v.ip == "" {
*v.ip = util.DetectedHostAddress()
glog.V(0).Infof("detected volume server ip address: %v", *v.ip)
log.Infof("detected volume server ip address: %v", *v.ip)
}
if *v.publicPort == 0 {
@ -226,7 +226,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if v.isSeparatedPublicPort() {
publicHttpDown = v.startPublicHttpService(publicVolumeMux)
if nil == publicHttpDown {
glog.Fatalf("start public http service failed")
log.Fatalf("start public http service failed")
}
}
@ -239,7 +239,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
// Stop heartbeats
if !volumeServer.StopHeartbeat() {
glog.V(0).Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
log.Infof("stop send heartbeat and wait %d seconds until shutdown ...", *v.preStopSeconds)
time.Sleep(time.Duration(*v.preStopSeconds) * time.Second)
}
@ -257,18 +257,18 @@ func shutdown(publicHttpDown httpdown.Server, clusterHttpServer httpdown.Server,
// firstly, stop the public http service to prevent from receiving new user request
if nil != publicHttpDown {
glog.V(0).Infof("stop public http server ... ")
log.Infof("stop public http server ... ")
if err := publicHttpDown.Stop(); err != nil {
glog.Warningf("stop the public http server failed, %v", err)
log.Warnf("stop the public http server failed, %v", err)
}
}
glog.V(0).Infof("graceful stop cluster http server ... ")
log.Infof("graceful stop cluster http server ... ")
if err := clusterHttpServer.Stop(); err != nil {
glog.Warningf("stop the cluster http server failed, %v", err)
log.Warnf("stop the cluster http server failed, %v", err)
}
glog.V(0).Infof("graceful stop gRPC ...")
log.Infof("graceful stop gRPC ...")
grpcS.GracefulStop()
volumeServer.Shutdown()
@ -286,14 +286,14 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
grpcPort := *v.port + 10000
grpcL, err := util.NewListener(*v.bindIp+":"+strconv.Itoa(grpcPort), 0)
if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
log.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
}
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, vs)
reflection.Register(grpcS)
go func() {
if err := grpcS.Serve(grpcL); err != nil {
glog.Fatalf("start gRPC service failed, %s", err)
log.Fatalf("start gRPC service failed, %s", err)
}
}()
return grpcS
@ -301,17 +301,17 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.publicPort)
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
log.Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
log.Fatalf("Volume server listener error:%v", e)
}
pubHttp := httpdown.HTTP{StopTimeout: 5 * time.Minute, KillTimeout: 5 * time.Minute}
publicHttpDown := pubHttp.Serve(&http.Server{Handler: handler}, publicListener)
go func() {
if err := publicHttpDown.Wait(); err != nil {
glog.Errorf("public http down wait failed, %v", err)
log.Errorf("public http down wait failed, %v", err)
}
}()
@ -328,10 +328,10 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
listeningAddress := *v.bindIp + ":" + strconv.Itoa(*v.port)
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
log.Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
log.Fatalf("Volume server listener error:%v", e)
}
httpDown := httpdown.HTTP{
@ -342,7 +342,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
clusterHttpServer := httpDown.Serve(&http.Server{Handler: handler}, listener)
go func() {
if e := clusterHttpServer.Wait(); e != nil {
glog.Fatalf("Volume server fail to serve: %v", e)
log.Fatalf("Volume server fail to serve: %v", e)
}
}()
return clusterHttpServer

View File

@ -5,9 +5,9 @@ import (
"testing"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
)
func TestXYZ(t *testing.T) {
glog.V(0).Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
log.Infoln("Last-Modified", time.Unix(int64(1373273596), 0).UTC().Format(http.TimeFormat))
}

View File

@ -9,7 +9,7 @@ import (
"strconv"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@ -54,7 +54,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadConfiguration("security", false)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
log.Infof("Starting Seaweed WebDav Server %s at https port %d", util.Version(), *webDavStandaloneOptions.port)
return webDavStandaloneOptions.startWebDav()
@ -76,7 +76,7 @@ func (wo *WebDavOption) startWebDav() bool {
// parse filer grpc address
filerGrpcAddress, err := pb.ParseFilerGrpcAddress(*wo.filer)
if err != nil {
glog.Fatal(err)
log.Fatal(err)
return false
}
@ -94,10 +94,10 @@ func (wo *WebDavOption) startWebDav() bool {
return nil
})
if err != nil {
glog.V(0).Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
log.Infof("wait to connect to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
time.Sleep(time.Second)
} else {
glog.V(0).Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
log.Infof("connected to filer %s grpc address %s", *wo.filer, filerGrpcAddress)
break
}
}
@ -114,7 +114,7 @@ func (wo *WebDavOption) startWebDav() bool {
CacheSizeMB: *wo.cacheSizeMB,
})
if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)
log.Fatalf("WebDav Server startup error: %v", webdavServer_err)
}
httpS := &http.Server{Handler: ws.Handler}
@ -122,18 +122,18 @@ func (wo *WebDavOption) startWebDav() bool {
listenAddress := fmt.Sprintf(":%d", *wo.port)
webDavListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
if err != nil {
glog.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
log.Fatalf("WebDav Server listener on %s error: %v", listenAddress, err)
}
if *wo.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
log.Infof("Start Seaweed WebDav Server %s at https port %d", util.Version(), *wo.port)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
log.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
log.Infof("Start Seaweed WebDav Server %s at http port %d", util.Version(), *wo.port)
if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
log.Fatalf("WebDav Server Fail to serve: %v", err)
}
}

View File

@ -5,7 +5,7 @@ import (
"database/sql"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"strings"
@ -81,7 +81,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
}
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("insert %s falls back to update: %v", entry.FullPath, err)
log.Debugf("insert %s falls back to update: %v", entry.FullPath, err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, meta, util.HashStringToLong(dir), name, dir)
if err != nil {
@ -187,7 +187,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string
var data []byte
if err = rows.Scan(&name, &data); err != nil {
glog.V(0).Infof("scan %s : %v", fullpath, err)
log.Infof("scan %s : %v", fullpath, err)
return nil, fmt.Errorf("scan %s: %v", fullpath, err)
}
@ -195,7 +195,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
FullPath: util.NewFullPath(string(fullpath), name),
}
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
log.Infof("scan decode %s : %v", entry.FullPath, err)
return nil, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
}

View File

@ -8,7 +8,7 @@ import (
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -24,7 +24,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
}
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err)
log.Debugf("kv insert falls back to update: %s", err)
res, err = store.getTxOrDB(ctx).ExecContext(ctx, store.SqlUpdate, value, dirHash, name, dirStr)
if err != nil {

View File

@ -6,7 +6,7 @@ import (
"github.com/gocql/gocql"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -42,7 +42,7 @@ func (store *CassandraStore) initialize(keyspace string, hosts []string, usernam
store.cluster.Consistency = gocql.LocalQuorum
store.session, err = store.cluster.CreateSession()
if err != nil {
glog.V(0).Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
log.Infof("Failed to open cassandra store, hosts %v, keyspace %s", hosts, keyspace)
}
return
}
@ -155,13 +155,13 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, fullpath
}
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)
}
if err := iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
log.Infof("list iterator close: %v", err)
}
return entries, err

View File

@ -3,7 +3,7 @@ package filer
import (
"os"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/spf13/viper"
)
@ -18,11 +18,11 @@ func (f *Filer) LoadConfiguration(config *viper.Viper) {
for _, store := range Stores {
if config.GetBool(store.GetName() + ".enabled") {
if err := store.Initialize(config, store.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize store for %s: %+v",
log.Fatalf("Failed to initialize store for %s: %+v",
store.GetName(), err)
}
f.SetStore(store)
glog.V(0).Infof("Configure filer for %s", store.GetName())
log.Infof("Configure filer for %s", store.GetName())
return
}
}
@ -43,7 +43,7 @@ func validateOneEnabledStore(config *viper.Viper) {
if enabledStore == "" {
enabledStore = store.GetName()
} else {
glog.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
log.Fatalf("Filer store is enabled for both %s and %s", enabledStore, store.GetName())
}
}
}

View File

@ -7,7 +7,7 @@ import (
"strings"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
jsoniter "github.com/json-iterator/go"
@ -67,7 +67,7 @@ func (store *ElasticStore) Initialize(configuration weed_util.Configuration, pre
if store.maxPageSize <= 0 {
store.maxPageSize = 10000
}
glog.Infof("filer store elastic endpoints: %v.", servers)
log.Infof("filer store elastic endpoints: %v.", servers)
return store.initialize(options)
}
@ -110,7 +110,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
}
value, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v.", err)
}
_, err = store.client.Index().
@ -120,7 +120,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
BodyJson(string(value)).
Do(ctx)
if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
log.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v.", err)
}
return nil
@ -149,7 +149,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
return esEntry.Entry, err
}
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
log.Errorf("find entry(%s),%v.", string(fullpath), err)
return nil, filer_pb.ErrNotFound
}
@ -167,7 +167,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
return nil
}
glog.Errorf("delete index(%s) %v.", index, err)
log.Errorf("delete index(%s) %v.", index, err)
return err
}
@ -182,7 +182,7 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
return nil
}
}
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
log.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
return fmt.Errorf("delete entry %v.", err)
}
@ -207,7 +207,7 @@ func (store *ElasticStore) ListDirectoryEntries(
func (store *ElasticStore) listRootDirectoryEntries(ctx context.Context, startFileName string, inclusive bool, limit int) (entries []*filer.Entry, err error) {
indexResult, err := store.client.CatIndices().Do(ctx)
if err != nil {
glog.Errorf("list indices %v.", err)
log.Errorf("list indices %v.", err)
return entries, err
}
for _, index := range indexResult {
@ -249,7 +249,7 @@ func (store *ElasticStore) listDirectoryEntries(
result := &elastic.SearchResult{}
if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil {
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
log.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err
}
} else {
@ -259,7 +259,7 @@ func (store *ElasticStore) listDirectoryEntries(
}
after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
log.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return entries, err
}
}

View File

@ -6,7 +6,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
jsoniter "github.com/json-iterator/go"
elastic "github.com/olivere/elastic/v7"
)
@ -22,7 +22,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
return nil
}
}
glog.Errorf("delete key(id:%s) %v.", string(key), err)
log.Errorf("delete key(id:%s) %v.", string(key), err)
return fmt.Errorf("delete key %v.", err)
}
@ -41,7 +41,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
return esEntry.Value, nil
}
}
glog.Errorf("find key(%s),%v.", string(key), err)
log.Errorf("find key(%s),%v.", string(key), err)
return value, filer.ErrKvNotFound
}
@ -49,7 +49,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
esEntry := &ESKVEntry{value}
val, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.Errorf("insert key(%s) %v.", string(key), err)
log.Errorf("insert key(%s) %v.", string(key), err)
return fmt.Errorf("insert key %v.", err)
}
_, err = store.client.Index().

View File

@ -9,7 +9,7 @@ import (
"go.etcd.io/etcd/clientv3"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@ -45,7 +45,7 @@ func (store *EtcdStore) Initialize(configuration weed_util.Configuration, prefix
}
func (store *EtcdStore) initialize(servers string, timeout string) (err error) {
glog.Infof("filer store etcd: %s", servers)
log.Infof("filer store etcd: %s", servers)
to, err := time.ParseDuration(timeout)
if err != nil {
@ -169,7 +169,7 @@ func (store *EtcdStore) ListDirectoryEntries(ctx context.Context, fullpath weed_
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)

View File

@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -87,7 +87,7 @@ func ResolveOneChunkManifest(lookupFileIdFn LookupFileIdFunctionType, chunk *fil
func fetchChunk(lookupFileIdFn LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) ([]byte, error) {
urlStrings, err := lookupFileIdFn(fileId)
if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
log.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return nil, err
}
return retriedFetchChunkData(urlStrings, cipherKey, isGzipped, true, 0, 0)
@ -108,14 +108,14 @@ func retriedFetchChunkData(urlStrings []string, cipherKey []byte, isGzipped bool
break
}
if err != nil {
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
log.Infof("read %s failed, err: %v", urlString, err)
buffer.Reset()
} else {
break
}
}
if err != nil && shouldRetry {
glog.V(0).Infof("retry reading in %v", waitTime)
log.Infof("retry reading in %v", waitTime)
time.Sleep(waitTime)
} else {
break

View File

@ -158,9 +158,9 @@ func ViewFromVisibleIntervals(visibles []VisibleInterval, offset int64, size int
func logPrintf(name string, visibles []VisibleInterval) {
/*
glog.V(0).Infof("%s len %d", name, len(visibles))
log.Infof("%s len %d", name, len(visibles))
for _, v := range visibles {
glog.V(0).Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
log.Infof("%s: [%d,%d) %s %d", name, v.start, v.stop, v.fileId, v.chunkOffset)
}
*/
}
@ -185,22 +185,22 @@ func MergeIntoVisibles(visibles []VisibleInterval, chunk *filer_pb.FileChunk) (n
}
logPrintf(" before", visibles)
// glog.V(0).Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
// log.Infof("newVisibles %d adding chunk [%d,%d) %s size:%d", len(newVisibles), chunk.Offset, chunk.Offset+int64(chunk.Size), chunk.GetFileIdString(), chunk.Size)
chunkStop := chunk.Offset + int64(chunk.Size)
for _, v := range visibles {
if v.start < chunk.Offset && chunk.Offset < v.stop {
t := newVisibleInterval(v.start, chunk.Offset, v.fileId, v.modifiedTime, v.chunkOffset, v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
// log.Infof("visible %d [%d,%d) =1> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
}
if v.start < chunkStop && chunkStop < v.stop {
t := newVisibleInterval(chunkStop, v.stop, v.fileId, v.modifiedTime, v.chunkOffset+(chunkStop-v.start), v.chunkSize, v.cipherKey, v.isGzipped)
newVisibles = append(newVisibles, t)
// glog.V(0).Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
// log.Infof("visible %d [%d,%d) =2> [%d,%d)", i, v.start, v.stop, t.start, t.stop)
}
if chunkStop <= v.start || v.stop <= chunk.Offset {
newVisibles = append(newVisibles, v)
// glog.V(0).Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
// log.Infof("visible %d [%d,%d) =3> [%d,%d)", i, v.start, v.stop, v.start, v.stop)
}
}
newVisibles = append(newVisibles, newV)
@ -240,7 +240,7 @@ func NonOverlappingVisibleIntervals(lookupFileIdFn LookupFileIdFunctionType, chu
for _, chunk := range chunks {
// glog.V(0).Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
// log.Infof("merge [%d,%d)", chunk.Offset, chunk.Offset+int64(chunk.Size))
visibles = MergeIntoVisibles(visibles, chunk)
logPrintf("add", visibles)

View File

@ -4,7 +4,7 @@ import (
"sort"
"testing"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@ -41,6 +41,6 @@ func printChunks(name string, chunks []*filer_pb.FileChunk) {
return chunks[i].Offset < chunks[j].Offset
})
for _, chunk := range chunks {
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
log.Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
}

View File

@ -9,7 +9,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@ -93,14 +93,14 @@ func (f *Filer) setOrLoadFilerStoreSignature(store FilerStore) {
storeIdBytes = make([]byte, 4)
util.Uint32toBytes(storeIdBytes, uint32(f.Signature))
if err = store.KvPut(context.Background(), []byte(FilerStoreId), storeIdBytes); err != nil {
glog.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
log.Fatalf("set %s=%d : %v", FilerStoreId, f.Signature, err)
}
glog.V(0).Infof("create %s to %d", FilerStoreId, f.Signature)
log.Infof("create %s to %d", FilerStoreId, f.Signature)
} else if err == nil && len(storeIdBytes) == 4 {
f.Signature = int32(util.BytesToUint32(storeIdBytes))
glog.V(0).Infof("existing %s = %d", FilerStoreId, f.Signature)
log.Infof("existing %s = %d", FilerStoreId, f.Signature)
} else {
glog.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
log.Fatalf("read %v=%v : %v", FilerStoreId, string(storeIdBytes), err)
}
}
@ -145,7 +145,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
// fmt.Printf("%d directory: %+v\n", i, dirPath)
// check the store directly
glog.V(4).Infof("find uncached directory: %s", dirPath)
log.Tracef("find uncached directory: %s", dirPath)
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
// no such existing directory
@ -169,11 +169,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
log.Debugf("create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
if _, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound {
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
log.Tracef("mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
@ -182,7 +182,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
} else if !dirEntry.IsDirectory() {
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
log.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath)
}
@ -194,13 +194,13 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
if lastDirectoryEntry == nil {
glog.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
log.Errorf("CreateEntry %s: lastDirectoryEntry is nil", entry.FullPath)
return fmt.Errorf("parent folder not found: %v", entry.FullPath)
}
/*
if !hasWritePermission(lastDirectoryEntry, entry) {
glog.V(0).Infof("directory %s: %v, entry: uid=%d gid=%d",
log.Infof("directory %s: %v, entry: uid=%d gid=%d",
lastDirectoryEntry.FullPath, lastDirectoryEntry.Attr, entry.Uid, entry.Gid)
return fmt.Errorf("no write permission in folder %v", lastDirectoryEntry.FullPath)
}
@ -209,19 +209,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
if oldEntry == nil {
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
log.Tracef("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
log.Errorf("insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
if o_excl {
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
log.Tracef("EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
}
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
log.Tracef("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
log.Errorf("update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
@ -231,7 +231,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
f.deleteChunksIfNotNew(oldEntry, entry)
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
log.Tracef("CreateEntry %s: created", entry.FullPath)
return nil
}
@ -239,11 +239,11 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err error) {
if oldEntry != nil {
if oldEntry.IsDirectory() && !entry.IsDirectory() {
glog.Errorf("existing %s is a directory", entry.FullPath)
log.Errorf("existing %s is a directory", entry.FullPath)
return fmt.Errorf("existing %s is a directory", entry.FullPath)
}
if !oldEntry.IsDirectory() && entry.IsDirectory() {
glog.Errorf("existing %s is a file", entry.FullPath)
log.Errorf("existing %s is a file", entry.FullPath)
return fmt.Errorf("existing %s is a file", entry.FullPath)
}
}
@ -321,7 +321,7 @@ func (f *Filer) Shutdown() {
func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
for _, hardLinkId := range hardLinkIds {
if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
log.Errorf("delete hard link id %d : %v", hardLinkId, err)
}
}
}

View File

@ -5,7 +5,7 @@ import (
"math"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -32,7 +32,7 @@ func (f *Filer) LoadBuckets() {
entries, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "")
if err != nil {
glog.V(1).Infof("no buckets found: %v", err)
log.Debugf("no buckets found: %v", err)
return
}
@ -41,7 +41,7 @@ func (f *Filer) LoadBuckets() {
shouldFsyncMap[bucket] = true
}
glog.V(1).Infof("buckets found: %d", len(entries))
log.Debugf("buckets found: %d", len(entries))
f.buckets.Lock()
for _, entry := range entries {

View File

@ -5,7 +5,7 @@ import (
"context"
"io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/jsonpb"
@ -36,7 +36,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
if err == filer_pb.ErrNotFound {
return nil
}
glog.Errorf("read filer conf entry %s: %v", filerConfPath, err)
log.Errorf("read filer conf entry %s: %v", filerConfPath, err)
return
}
@ -46,7 +46,7 @@ func (fc *FilerConf) loadFromFiler(filer *Filer) (err error) {
func (fc *FilerConf) loadFromChunks(filer *Filer, chunks []*filer_pb.FileChunk) (err error) {
data, err := filer.readEntry(chunks)
if err != nil {
glog.Errorf("read filer conf content: %v", err)
log.Errorf("read filer conf content: %v", err)
return
}
@ -60,7 +60,7 @@ func (fc *FilerConf) LoadFromBytes(data []byte) (err error) {
err = proto.UnmarshalText(string(data), conf)
if err != nil {
glog.Errorf("unable to parse filer conf: %v", err)
log.Errorf("unable to parse filer conf: %v", err)
// this is not recoverable
return nil
}
@ -85,7 +85,7 @@ func (fc *FilerConf) doLoadConf(conf *filer_pb.FilerConf) (err error) {
func (fc *FilerConf) AddLocationConf(locConf *filer_pb.FilerConf_PathConf) (err error) {
err = fc.rules.Put([]byte(locConf.LocationPrefix), locConf)
if err != nil {
glog.Errorf("put location prefix: %v", err)
log.Errorf("put location prefix: %v", err)
}
return
}

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@ -33,7 +33,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
var dirHardLinkIds []HardLinkId
dirChunks, dirHardLinkIds, err = f.doBatchDeleteFolderMetaAndData(ctx, entry, isRecursive, ignoreRecursiveError, shouldDeleteChunks && !isCollection, isFromOtherCluster, signatures)
if err != nil {
glog.V(0).Infof("delete directory %s: %v", p, err)
log.Infof("delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
chunks = append(chunks, dirChunks...)
@ -71,12 +71,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
for {
entries, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "")
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
log.Errorf("list folder %s: %v", entry.FullPath, err)
return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
glog.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
log.Errorf("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return nil, nil, fmt.Errorf("fail to delete non-empty folder: %s", entry.FullPath)
}
@ -107,7 +107,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
}
glog.V(3).Infof("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
log.Tracef("deleting directory %v delete %d chunks: %v", entry.FullPath, len(chunks), shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return nil, nil, fmt.Errorf("filer store delete: %v", storeDeletionErr)
@ -120,7 +120,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
log.Tracef("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteEntry(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
@ -139,7 +139,7 @@ func (f *Filer) doDeleteCollection(collectionName string) (err error) {
Name: collectionName,
})
if err != nil {
glog.Infof("delete collection %s: %v", collectionName, err)
log.Infof("delete collection %s: %v", collectionName, err)
}
return err
})

View File

@ -4,7 +4,7 @@ import (
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/wdclient"
@ -54,10 +54,10 @@ func (f *Filer) loopProcessingDeletion() {
_, err := operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, toDeleteFileIds, lookupFunc)
if err != nil {
if !strings.Contains(err.Error(), "already deleted") {
glog.V(0).Infof("deleting fileIds len=%d error: %v", deletionCount, err)
log.Infof("deleting fileIds len=%d error: %v", deletionCount, err)
}
} else {
glog.V(1).Infof("deleting fileIds len=%d", deletionCount)
log.Debugf("deleting fileIds len=%d", deletionCount)
}
}
})
@ -76,7 +76,7 @@ func (f *Filer) DeleteChunks(chunks []*filer_pb.FileChunk) {
}
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())

View File

@ -9,7 +9,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
@ -54,7 +54,7 @@ func (f *Filer) NotifyUpdateEvent(ctx context.Context, oldEntry, newEntry *Entry
}
if notification.Queue != nil {
glog.V(3).Infof("notifying entry update %v", fullpath)
log.Tracef("notifying entry update %v", fullpath)
notification.Queue.SendMessage(fullpath, eventNotification)
}
@ -73,7 +73,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica
}
data, err := proto.Marshal(event)
if err != nil {
glog.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
log.Errorf("failed to marshal filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return
}
@ -96,7 +96,7 @@ func (f *Filer) logFlushFunc(startTime, stopTime time.Time, buf []byte) {
for {
if err := f.appendToFile(targetFile, buf); err != nil {
glog.V(1).Infof("log write failed %s: %v", targetFile, err)
log.Debugf("log write failed %s: %v", targetFile, err)
time.Sleep(737 * time.Millisecond)
} else {
break

View File

@ -4,7 +4,7 @@ import (
"bytes"
"math"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -22,7 +22,7 @@ func (f *Filer) onMetadataChangeEvent(event *filer_pb.SubscribeMetadataResponse)
return
}
glog.V(0).Infof("procesing %v", event)
log.Infof("procesing %v", event)
if entry.Name == FilerConfName {
f.reloadFilerConfiguration(entry)
}
@ -42,7 +42,7 @@ func (f *Filer) reloadFilerConfiguration(entry *filer_pb.Entry) {
fc := NewFilerConf()
err := fc.loadFromChunks(f, entry.Chunks)
if err != nil {
glog.Errorf("read filer conf chunks: %v", err)
log.Errorf("read filer conf chunks: %v", err)
return
}
f.FilerConf = fc
@ -54,7 +54,7 @@ func (f *Filer) LoadFilerConf() {
return fc.loadFromFiler(f)
})
if err != nil {
glog.Errorf("read filer conf: %v", err)
log.Errorf("read filer conf: %v", err)
return
}
f.FilerConf = fc

View File

@ -4,7 +4,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@ -54,12 +54,12 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
value, err := fsw.KvGet(ctx, key)
if err != nil {
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
log.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
if err = entry.DecodeAttributesAndChunks(value); err != nil {
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
log.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}

View File

@ -10,7 +10,7 @@ import (
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@ -37,7 +37,7 @@ func (store *LevelDBStore) Initialize(configuration weed_util.Configuration, pre
}
func (store *LevelDBStore) initialize(dir string) (err error) {
glog.Infof("filer store dir: %s", dir)
log.Infof("filer store dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
@ -53,7 +53,7 @@ func (store *LevelDBStore) initialize(dir string) (err error) {
store.db, err = leveldb.RecoverFile(dir, opts)
}
if err != nil {
glog.Infof("filer store open dir %s: %v", dir, err)
log.Infof("filer store open dir %s: %v", dir, err)
return
}
}
@ -193,7 +193,7 @@ func (store *LevelDBStore) ListDirectoryEntries(ctx context.Context, fullpath we
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)

View File

@ -13,7 +13,7 @@ import (
"os"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
weed_util "github.com/chrislusf/seaweedfs/weed/util"
)
@ -37,7 +37,7 @@ func (store *LevelDB2Store) Initialize(configuration weed_util.Configuration, pr
}
func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
glog.Infof("filer store leveldb2 dir: %s", dir)
log.Infof("filer store leveldb2 dir: %s", dir)
if err := weed_util.TestFolderWritable(dir); err != nil {
return fmt.Errorf("Check Level Folder %s Writable: %s", dir, err)
}
@ -56,7 +56,7 @@ func (store *LevelDB2Store) initialize(dir string, dbCount int) (err error) {
db, dbErr = leveldb.RecoverFile(dbFolder, opts)
}
if dbErr != nil {
glog.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
log.Errorf("filer store open dir %s: %v", dbFolder, dbErr)
return dbErr
}
store.dbs = append(store.dbs, db)
@ -205,7 +205,7 @@ func (store *LevelDB2Store) ListDirectoryEntries(ctx context.Context, fullpath w
// println("list", entry.FullPath, "chunks", len(entry.Chunks))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break
}
entries = append(entries, entry)

View File

@ -11,7 +11,7 @@ import (
"github.com/golang/protobuf/proto"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
@ -64,7 +64,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
peerSignature, err := ma.readFilerStoreSignature(peer)
for err != nil {
glog.V(0).Infof("connecting to peer filer %s: %v", peer, err)
log.Infof("connecting to peer filer %s: %v", peer, err)
time.Sleep(1357 * time.Millisecond)
peerSignature, err = ma.readFilerStoreSignature(peer)
}
@ -74,27 +74,27 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
lastTsNs = prevTsNs
}
glog.V(0).Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
log.Infof("follow peer: %v, last %v (%d)", peer, time.Unix(0, lastTsNs), lastTsNs)
var counter int64
var synced bool
maybeReplicateMetadataChange = func(event *filer_pb.SubscribeMetadataResponse) {
if err := Replay(f.Store, event); err != nil {
glog.Errorf("failed to reply metadata change from %v: %v", peer, err)
log.Errorf("failed to reply metadata change from %v: %v", peer, err)
return
}
counter++
if lastPersistTime.Add(time.Minute).Before(time.Now()) {
if err := ma.updateOffset(f, peer, peerSignature, event.TsNs); err == nil {
if event.TsNs < time.Now().Add(-2*time.Minute).UnixNano() {
glog.V(0).Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
log.Infof("sync with %s progressed to: %v %0.2f/sec", peer, time.Unix(0, event.TsNs), float64(counter)/60.0)
} else if !synced {
synced = true
glog.V(0).Infof("synced with %s", peer)
log.Infof("synced with %s", peer)
}
lastPersistTime = time.Now()
counter = 0
} else {
glog.V(0).Infof("failed to update offset for %v: %v", peer, err)
log.Infof("failed to update offset for %v: %v", peer, err)
}
}
}
@ -103,7 +103,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
processEventFn := func(event *filer_pb.SubscribeMetadataResponse) error {
data, err := proto.Marshal(event)
if err != nil {
glog.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
log.Errorf("failed to marshal subscribed filer_pb.SubscribeMetadataResponse %+v: %v", event, err)
return err
}
dir := event.Directory
@ -147,7 +147,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, peer string
}
})
if err != nil {
glog.V(0).Infof("subscribing remote %s meta change: %v", peer, err)
log.Infof("subscribing remote %s meta change: %v", peer, err)
time.Sleep(1733 * time.Millisecond)
}
}
@ -177,7 +177,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
value, err := f.Store.KvGet(context.Background(), key)
if err == ErrKvNotFound {
glog.Warningf("readOffset %s not found", peer)
log.Warnf("readOffset %s not found", peer)
return 0, nil
}
@ -187,7 +187,7 @@ func (ma *MetaAggregator) readOffset(f *Filer, peer string, peerSignature int32)
lastTsNs = int64(util.BytesToUint64(value))
glog.V(0).Infof("readOffset %s : %d", peer, lastTsNs)
log.Infof("readOffset %s : %d", peer, lastTsNs)
return
}
@ -206,7 +206,7 @@ func (ma *MetaAggregator) updateOffset(f *Filer, peer string, peerSignature int3
return fmt.Errorf("updateOffset %s : %v", peer, err)
}
glog.V(4).Infof("updateOffset %s : %d", peer, lastTsNs)
log.Tracef("updateOffset %s : %d", peer, lastTsNs)
return
}

View File

@ -3,7 +3,7 @@ package filer
import (
"context"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -14,7 +14,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
var newEntry *Entry
if message.OldEntry != nil {
oldPath = util.NewFullPath(resp.Directory, message.OldEntry.Name)
glog.V(4).Infof("deleting %v", oldPath)
log.Tracef("deleting %v", oldPath)
if err := filerStore.DeleteEntry(context.Background(), oldPath); err != nil {
return err
}
@ -26,7 +26,7 @@ func Replay(filerStore FilerStore, resp *filer_pb.SubscribeMetadataResponse) err
dir = message.NewParentPath
}
key := util.NewFullPath(dir, message.NewEntry.Name)
glog.V(4).Infof("creating %v", key)
log.Tracef("creating %v", key)
newEntry = FromPbEntry(dir, message.NewEntry)
if err := filerStore.InsertEntry(context.Background(), newEntry); err != nil {
return err

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"go.mongodb.org/mongo-driver/bson"
@ -134,7 +134,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("find %s: %v", fullpath, err)
log.Errorf("find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
@ -205,7 +205,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
}
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
log.Infof("list %s : %v", entry.FullPath, err)
break
}
@ -213,7 +213,7 @@ func (store *MongodbStore) ListDirectoryEntries(ctx context.Context, fullpath ut
}
if err := cur.Close(ctx); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
log.Infof("list iterator close: %v", err)
}
return entries, err

View File

@ -4,7 +4,7 @@ import (
"context"
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
)
@ -36,7 +36,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("kv get: %v", err)
log.Errorf("kv get: %v", err)
return nil, filer.ErrKvNotFound
}

View File

@ -7,7 +7,7 @@ import (
"math/rand"
"sync"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@ -54,7 +54,7 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
locations = resp.LocationsMap[vid]
if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", fileId)
log.Infof("failed to locate %s", fileId)
return fmt.Errorf("failed to locate %s", fileId)
}
vicCacheLock.Lock()
@ -101,7 +101,7 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
c.readerLock.Lock()
defer c.readerLock.Unlock()
glog.V(4).Infof("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
log.Tracef("ReadAt [%d,%d) of total file size %d bytes %d chunk views", offset, offset+int64(len(p)), c.fileSize, len(c.chunkViews))
return c.doReadAt(p[n:], offset+int64(n))
}
@ -121,7 +121,7 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
}
if startOffset < chunk.LogicOffset {
gap := int(chunk.LogicOffset - startOffset)
glog.V(4).Infof("zero [%d,%d)", startOffset, startOffset+int64(gap))
log.Tracef("zero [%d,%d)", startOffset, startOffset+int64(gap))
n += int(min(int64(gap), remaining))
startOffset, remaining = chunk.LogicOffset, remaining-int64(gap)
if remaining <= 0 {
@ -133,10 +133,10 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
if chunkStart >= chunkStop {
continue
}
glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
log.Tracef("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
if err != nil {
glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
log.Errorf("fetching chunk %+v: %v\n", chunk, err)
return
}
bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
@ -145,11 +145,11 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
}
glog.V(4).Infof("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
log.Tracef("doReadAt [%d,%d), n:%v, err:%v", offset, offset+int64(len(p)), n, err)
if err == nil && remaining > 0 && c.fileSize > startOffset {
delta := int(min(remaining, c.fileSize-startOffset))
glog.V(4).Infof("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
log.Tracef("zero2 [%d,%d) of file size %d bytes", startOffset, startOffset+int64(delta), c.fileSize)
n += delta
}
@ -194,11 +194,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro
return c.fetchGroup.Do(chunkView.FileId, func() (interface{}, error) {
glog.V(4).Infof("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
log.Tracef("readFromWholeChunkData %s offset %d [%d,%d) size at least %d", chunkView.FileId, chunkView.Offset, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size), chunkView.ChunkSize)
data := c.chunkCache.GetChunk(chunkView.FileId, chunkView.ChunkSize)
if data != nil {
glog.V(4).Infof("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
log.Tracef("cache hit %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset-chunkView.Offset, chunkView.LogicOffset-chunkView.Offset+int64(len(data)))
} else {
var err error
data, err = c.doFetchFullChunkData(chunkView)
@ -213,11 +213,11 @@ func (c *ChunkReadAt) readOneWholeChunk(chunkView *ChunkView) (interface{}, erro
func (c *ChunkReadAt) doFetchFullChunkData(chunkView *ChunkView) ([]byte, error) {
glog.V(4).Infof("+ doFetchFullChunkData %s", chunkView.FileId)
log.Tracef("+ doFetchFullChunkData %s", chunkView.FileId)
data, err := fetchChunk(c.lookupFileId, chunkView.FileId, chunkView.CipherKey, chunkView.IsGzipped)
glog.V(4).Infof("- doFetchFullChunkData %s", chunkView.FileId)
log.Tracef("- doFetchFullChunkData %s", chunkView.FileId)
return data, err

View File

@ -10,7 +10,7 @@ import (
"github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -170,7 +170,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, full
path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
log.Infof("list %s : %v", path, err)
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {

View File

@ -8,7 +8,7 @@ import (
"github.com/go-redis/redis"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -149,7 +149,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, ful
path := util.NewFullPath(string(fullpath), fileName)
entry, err := store.FindEntry(ctx, path)
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
log.Infof("list %s : %v", path, err)
} else {
if entry.TtlSec > 0 {
if entry.Attr.Crtime.Add(time.Duration(entry.TtlSec) * time.Second).Before(time.Now()) {

View File

@ -7,7 +7,7 @@ import (
"math"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
@ -24,7 +24,7 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
urlStrings, err := masterClient.LookupFileId(chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
fileId2Url[chunkView.FileId] = urlStrings
@ -36,12 +36,12 @@ func StreamContent(masterClient *wdclient.MasterClient, w io.Writer, chunks []*f
data, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
if err != nil {
glog.Errorf("read chunk: %v", err)
log.Errorf("read chunk: %v", err)
return fmt.Errorf("read chunk: %v", err)
}
_, err = w.Write(data)
if err != nil {
glog.Errorf("write chunk: %v", err)
log.Errorf("write chunk: %v", err)
return fmt.Errorf("write chunk: %v", err)
}
}
@ -65,7 +65,7 @@ func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk)
for _, chunkView := range chunkViews {
urlStrings, err := lookupFileIdFn(chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return nil, err
}
@ -175,7 +175,7 @@ func (c *ChunkStreamReader) Seek(offset int64, whence int) (int64, error) {
func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
urlStrings, err := c.lookupFileId(chunkView.FileId)
if err != nil {
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
log.Debugf("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
return err
}
var buffer bytes.Buffer
@ -188,7 +188,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
break
}
if err != nil {
glog.V(1).Infof("read %s failed, err: %v", chunkView.FileId, err)
log.Debugf("read %s failed, err: %v", chunkView.FileId, err)
buffer.Reset()
} else {
break
@ -201,7 +201,7 @@ func (c *ChunkStreamReader) fetchChunkToBuffer(chunkView *ChunkView) error {
c.bufferPos = 0
c.bufferOffset = chunkView.LogicOffset
// glog.V(0).Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
// log.Infof("read %s [%d,%d)", chunkView.FileId, chunkView.LogicOffset, chunkView.LogicOffset+int64(chunkView.Size))
return nil
}

View File

@ -13,7 +13,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -48,12 +48,12 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
if dir.FullPath() == dir.wfs.option.FilerMountRootPath {
dir.setRootDirAttributes(attr)
glog.V(3).Infof("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
log.Tracef("root dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
if err := dir.maybeLoadEntry(); err != nil {
glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
log.Tracef("dir Attr %s,err: %+v", dir.FullPath(), err)
return err
}
@ -64,14 +64,14 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
attr.Gid = dir.entry.Attributes.Gid
attr.Uid = dir.entry.Attributes.Uid
glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
log.Tracef("dir Attr %s, attr: %+v", dir.FullPath(), attr)
return nil
}
func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
log.Tracef("dir Getxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
return err
@ -96,7 +96,7 @@ func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
glog.V(3).Infof("dir %s fsync %+v", dir.FullPath(), req)
log.Tracef("dir %s fsync %+v", dir.FullPath(), req)
return nil
}
@ -146,7 +146,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
OExcl: req.Flags&fuse.OpenExclusive != 0,
Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
log.Debugf("create %s/%s: %v", dir.FullPath(), req.Name, req.Flags)
if err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
@ -157,7 +157,7 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
if strings.Contains(err.Error(), "EEXIST") {
return fuse.EEXIST
}
glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("create %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.EIO
}
@ -182,21 +182,21 @@ func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
if req.Mode&os.ModeNamedPipe != 0 {
glog.V(1).Infof("mknod named pipe %s", req.String())
log.Debugf("mknod named pipe %s", req.String())
return nil, fuse.ENOSYS
}
if req.Mode&req.Mode&os.ModeSocket != 0 {
glog.V(1).Infof("mknod socket %s", req.String())
log.Debugf("mknod socket %s", req.String())
return nil, fuse.ENOSYS
}
// not going to support mknod for normal files either
glog.V(1).Infof("mknod %s", req.String())
log.Debugf("mknod %s", req.String())
return nil, fuse.ENOSYS
}
func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
log.Tracef("mkdir %s: %s", dir.FullPath(), req.Name)
newEntry := &filer_pb.Entry{
Name: req.Name,
@ -221,9 +221,9 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("mkdir: %v", request)
log.Debugf("mkdir: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return err
}
@ -238,20 +238,20 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
return node, nil
}
glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
return nil, fuse.EIO
}
func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (node fs.Node, err error) {
glog.V(4).Infof("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
log.Tracef("dir Lookup %s: %s by %s", dir.FullPath(), req.Name, req.Header.String())
fullFilePath := util.NewFullPath(dir.FullPath(), req.Name)
dirPath := util.FullPath(dir.FullPath())
visitErr := meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath)
if visitErr != nil {
glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
log.Errorf("dir Lookup %s: %v", dirPath, visitErr)
return nil, fuse.EIO
}
cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
@ -261,14 +261,14 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
entry := cachedEntry.ToProtoEntry()
if entry == nil {
// glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
// log.Tracef("dir Lookup cache miss %s", fullFilePath)
entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
if err != nil {
glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
log.Debugf("dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
} else {
glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
log.Tracef("dir Lookup cache hit %s", fullFilePath)
}
if entry != nil {
@ -293,13 +293,13 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
return node, nil
}
glog.V(4).Infof("not found dir GetEntry %s: %v", fullFilePath, err)
log.Tracef("not found dir GetEntry %s: %v", fullFilePath, err)
return nil, fuse.ENOENT
}
func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
glog.V(4).Infof("dir ReadDirAll %s", dir.FullPath())
log.Tracef("dir ReadDirAll %s", dir.FullPath())
processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
fullpath := util.NewFullPath(dir.FullPath(), entry.Name)
@ -316,12 +316,12 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
dirPath := util.FullPath(dir.FullPath())
if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
glog.Errorf("dir ReadDirAll %s: %v", dirPath, err)
log.Errorf("dir ReadDirAll %s: %v", dirPath, err)
return nil, fuse.EIO
}
listedEntries, listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), util.FullPath(dir.FullPath()), "", false, int(math.MaxInt32))
if listErr != nil {
glog.Errorf("list meta cache: %v", listErr)
log.Errorf("list meta cache: %v", listErr)
return nil, fuse.EIO
}
for _, cachedEntry := range listedEntries {
@ -352,11 +352,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
}
// first, ensure the filer store can correctly delete
glog.V(3).Infof("remove file: %v", req)
log.Tracef("remove file: %v", req)
isDeleteData := entry.HardLinkCounter <= 1
err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
if err != nil {
glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
log.Tracef("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
return fuse.ENOENT
}
@ -389,11 +389,11 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
glog.V(3).Infof("remove directory entry: %v", req)
log.Tracef("remove directory entry: %v", req)
ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
if err != nil {
glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
log.Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
if strings.Contains(err.Error(), "non-empty") {
return fuse.EEXIST
}
@ -410,7 +410,7 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
log.Tracef("%v dir setattr %+v", dir.FullPath(), req)
if err := dir.maybeLoadEntry(); err != nil {
return err
@ -438,7 +438,7 @@ func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fus
func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
log.Tracef("dir Setxattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
return err
@ -454,7 +454,7 @@ func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
log.Tracef("dir Removexattr %s: %s", dir.FullPath(), req.Name)
if err := dir.maybeLoadEntry(); err != nil {
return err
@ -470,7 +470,7 @@ func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) e
func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
log.Tracef("dir Listxattr %s", dir.FullPath())
if err := dir.maybeLoadEntry(); err != nil {
return err
@ -485,7 +485,7 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
}
func (dir *Dir) Forget() {
glog.V(4).Infof("Forget dir %s", dir.FullPath())
log.Tracef("Forget dir %s", dir.FullPath())
dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
}
@ -517,10 +517,10 @@ func (dir *Dir) saveEntry() error {
Signatures: []int32{dir.wfs.signature},
}
glog.V(1).Infof("save dir entry: %v", request)
log.Debugf("save dir entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
log.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
return fuse.EIO
}

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
@ -26,10 +26,10 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
oldFile, ok := old.(*File)
if !ok {
glog.Errorf("old node is not a file: %+v", old)
log.Errorf("old node is not a file: %+v", old)
}
glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
log.Tracef("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
return nil, err
@ -69,13 +69,13 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.UpdateEntry(client, updateOldEntryRequest); err != nil {
glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO
}
dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(updateOldEntryRequest.Directory, updateOldEntryRequest.Entry))
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
log.Infof("Link %v/%v -> %s/%s: %v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName, err)
return fuse.EIO
}
dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
@ -96,7 +96,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
log.Tracef("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
request := &filer_pb.CreateEntryRequest{
Directory: dir.FullPath(),
@ -121,7 +121,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
log.Infof("symlink %s/%s: %v", dir.FullPath(), req.NewName, err)
return fuse.EIO
}
@ -147,7 +147,7 @@ func (file *File) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (stri
return "", fuse.Errno(syscall.EINVAL)
}
glog.V(4).Infof("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
log.Tracef("Readlink: %v/%v => %v", file.dir.FullPath(), file.Name, entry.Attributes.SymlinkTarget)
return entry.Attributes.SymlinkTarget, nil

View File

@ -6,7 +6,7 @@ import (
"github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -18,12 +18,12 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
oldPath := util.NewFullPath(dir.FullPath(), req.OldName)
glog.V(4).Infof("dir Rename %s => %s", oldPath, newPath)
log.Tracef("dir Rename %s => %s", oldPath, newPath)
// find local old entry
oldEntry, err := dir.wfs.metaCache.FindEntry(context.Background(), oldPath)
if err != nil {
glog.Errorf("dir Rename can not find source %s : %v", oldPath, err)
log.Errorf("dir Rename can not find source %s : %v", oldPath, err)
return fuse.ENOENT
}
@ -41,7 +41,7 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
_, err := client.AtomicRenameEntry(ctx, request)
if err != nil {
glog.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
log.Errorf("dir AtomicRenameEntry %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
@ -49,18 +49,18 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
})
if err != nil {
glog.V(0).Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
log.Infof("dir Rename %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
// TODO: replicate renaming logic on filer
if err := dir.wfs.metaCache.DeleteEntry(context.Background(), oldPath); err != nil {
glog.V(0).Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
log.Infof("dir Rename delete local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}
oldEntry.FullPath = newPath
if err := dir.wfs.metaCache.InsertEntry(context.Background(), oldEntry); err != nil {
glog.V(0).Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
log.Infof("dir Rename insert local %s => %s : %v", oldPath, newPath, err)
return fuse.EIO
}

View File

@ -7,7 +7,7 @@ import (
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@ -41,7 +41,7 @@ func newDirtyPages(file *File) *ContinuousDirtyPages {
func (pages *ContinuousDirtyPages) AddPage(offset int64, data []byte) {
glog.V(4).Infof("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
log.Tracef("%s AddPage [%d,%d) of %d bytes", pages.f.fullpath(), offset, offset+int64(len(data)), pages.f.entry.Attributes.FileSize)
if len(data) > int(pages.f.wfs.option.ChunkSizeLimit) {
// this is more than what buffer can hold.
@ -111,7 +111,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
reader = io.LimitReader(reader, size)
chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
if err != nil {
glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
log.Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
pages.chunkSaveErrChan <- err
return
}
@ -120,7 +120,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
pages.chunkAddLock.Lock()
defer pages.chunkAddLock.Unlock()
pages.f.addChunks([]*filer_pb.FileChunk{chunk})
glog.V(3).Infof("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
log.Tracef("%s saveToStorage [%d,%d)", pages.f.fullpath(), offset, offset+size)
}
if pages.f.wfs.concurrentWriters != nil {

View File

@ -30,12 +30,12 @@ func (list *IntervalLinkedList) Size() int64 {
return list.Tail.Offset + list.Tail.Size - list.Head.Offset
}
func (list *IntervalLinkedList) addNodeToTail(node *IntervalNode) {
// glog.V(4).Infof("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
// log.Tracef("add to tail [%d,%d) + [%d,%d) => [%d,%d)", list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, node.Offset+node.Size, list.Head.Offset, node.Offset+node.Size)
list.Tail.Next = node
list.Tail = node
}
func (list *IntervalLinkedList) addNodeToHead(node *IntervalNode) {
// glog.V(4).Infof("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
// log.Tracef("add to head [%d,%d) + [%d,%d) => [%d,%d)", node.Offset, node.Offset+node.Size, list.Head.Offset, list.Tail.Offset+list.Tail.Size, node.Offset, list.Tail.Offset+list.Tail.Size)
node.Next = list.Head
list.Head = node
}
@ -46,7 +46,7 @@ func (list *IntervalLinkedList) ReadData(buf []byte, start, stop int64) {
nodeStart, nodeStop := max(start, t.Offset), min(stop, t.Offset+t.Size)
if nodeStart < nodeStop {
// glog.V(0).Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
// log.Infof("copying start=%d stop=%d t=[%d,%d) t.data=%d => bufSize=%d nodeStart=%d, nodeStop=%d", start, stop, t.Offset, t.Offset+t.Size, len(t.Data), len(buf), nodeStart, nodeStop)
copy(buf[nodeStart-start:], t.Data[nodeStart-t.Offset:nodeStop-t.Offset])
}
@ -144,7 +144,7 @@ func (c *ContinuousIntervals) AddInterval(data []byte, offset int64) {
}
if prevList != nil && nextList != nil {
// glog.V(4).Infof("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
// log.Tracef("connecting [%d,%d) + [%d,%d) => [%d,%d)", prevList.Head.Offset, prevList.Tail.Offset+prevList.Tail.Size, nextList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size, prevList.Head.Offset, nextList.Tail.Offset+nextList.Tail.Size)
prevList.Tail.Next = nextList.Head
prevList.Tail = nextList.Tail
c.removeList(nextList)

View File

@ -11,7 +11,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -45,7 +45,7 @@ func (file *File) fullpath() util.FullPath {
func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
glog.V(4).Infof("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
log.Tracef("file Attr %s, open:%v, existing attr: %+v", file.fullpath(), file.isOpen, attr)
entry := file.entry
if file.isOpen <= 0 || entry == nil {
@ -60,7 +60,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
attr.Size = filer.FileSize(entry)
if file.isOpen > 0 {
attr.Size = entry.Attributes.FileSize
glog.V(4).Infof("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
log.Tracef("file Attr %s, open:%v, size: %d", file.fullpath(), file.isOpen, attr.Size)
}
attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
@ -78,7 +78,7 @@ func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
glog.V(4).Infof("file Getxattr %s", file.fullpath())
log.Tracef("file Getxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@ -90,13 +90,13 @@ func (file *File) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp
func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (fs.Handle, error) {
glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
log.Tracef("file %v open %+v", file.fullpath(), req)
handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
resp.Handle = fuse.HandleID(handle.handle)
glog.V(4).Infof("%v file open handle id = %d", file.fullpath(), handle.handle)
log.Tracef("%v file open handle id = %d", file.fullpath(), handle.handle)
return handle, nil
@ -104,7 +104,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
log.Tracef("%v file setattr %+v", file.fullpath(), req)
_, err := file.maybeLoadEntry(ctx)
if err != nil {
@ -123,7 +123,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
if req.Valid.Size() {
glog.V(4).Infof("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
log.Tracef("%v file setattr set size=%v chunks=%d", file.fullpath(), req.Size, len(file.entry.Chunks))
if req.Size < filer.FileSize(file.entry) {
// fmt.Printf("truncate %v \n", fullPath)
var chunks []*filer_pb.FileChunk
@ -135,10 +135,10 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
int64Size = int64(req.Size) - chunk.Offset
if int64Size > 0 {
chunks = append(chunks, chunk)
glog.V(4).Infof("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
log.Tracef("truncated chunk %+v from %d to %d\n", chunk.GetFileIdString(), chunk.Size, int64Size)
chunk.Size = uint64(int64Size)
} else {
glog.V(4).Infof("truncated whole chunk %+v\n", chunk.GetFileIdString())
log.Tracef("truncated whole chunk %+v\n", chunk.GetFileIdString())
truncatedChunks = append(truncatedChunks, chunk)
}
}
@ -195,7 +195,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
log.Tracef("file Setxattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@ -212,7 +212,7 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
log.Tracef("file Removexattr %s: %s", file.fullpath(), req.Name)
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@ -229,7 +229,7 @@ func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest)
func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
glog.V(4).Infof("file Listxattr %s", file.fullpath())
log.Tracef("file Listxattr %s", file.fullpath())
entry, err := file.maybeLoadEntry(ctx)
if err != nil {
@ -247,14 +247,14 @@ func (file *File) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, res
func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
// fsync works at OS level
// write the file chunks to the filerGrpcAddress
glog.V(4).Infof("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
log.Tracef("%s/%s fsync file %+v", file.dir.FullPath(), file.Name, req)
return nil
}
func (file *File) Forget() {
t := util.NewFullPath(file.dir.FullPath(), file.Name)
glog.V(4).Infof("Forget file %s", t)
log.Tracef("Forget file %s", t)
file.wfs.fsNodeCache.DeleteFsNode(t)
}
@ -271,13 +271,13 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
}
entry, err = file.wfs.maybeLoadEntry(file.dir.FullPath(), file.Name)
if err != nil {
glog.V(3).Infof("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
log.Tracef("maybeLoadEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return entry, err
}
if entry != nil {
file.setEntry(entry)
} else {
glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
log.Warnf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
}
return entry, nil
}
@ -319,7 +319,7 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
file.reader = nil
glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
log.Tracef("%s existing %d chunks adds %d more", file.fullpath(), len(file.entry.Chunks), len(chunks))
file.entry.Chunks = append(file.entry.Chunks, newChunks...)
}
@ -348,10 +348,10 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
Signatures: []int32{file.wfs.signature},
}
glog.V(4).Infof("save file entry: %v", request)
log.Tracef("save file entry: %v", request)
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
glog.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
log.Errorf("UpdateEntry file %s/%s: %v", file.dir.FullPath(), file.Name, err)
return fuse.EIO
}

View File

@ -14,7 +14,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@ -57,7 +57,7 @@ var _ = fs.HandleReleaser(&FileHandle{})
func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
glog.V(4).Infof("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
log.Tracef("%s read fh %d: [%d,%d) size %d resp.Data cap=%d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, cap(resp.Data))
fh.RLock()
defer fh.RUnlock()
@ -82,12 +82,12 @@ func (fh *FileHandle) Read(ctx context.Context, req *fuse.ReadRequest, resp *fus
}
if err != nil {
glog.Warningf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
log.Warnf("file handle read %s %d: %v", fh.f.fullpath(), totalRead, err)
return fuse.EIO
}
if totalRead > int64(len(buff)) {
glog.Warningf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
log.Warnf("%s FileHandle Read %d: [%d,%d) size %d totalRead %d", fh.f.fullpath(), fh.handle, req.Offset, req.Offset+int64(req.Size), req.Size, totalRead)
totalRead = min(int64(len(buff)), totalRead)
}
// resp.Data = buff[:totalRead]
@ -106,7 +106,7 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
fileSize := int64(filer.FileSize(fh.f.entry))
if fileSize == 0 {
glog.V(1).Infof("empty fh %v", fh.f.fullpath())
log.Debugf("empty fh %v", fh.f.fullpath())
return 0, io.EOF
}
@ -127,10 +127,10 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
totalRead, err := fh.f.reader.ReadAt(buff, offset)
if err != nil && err != io.EOF {
glog.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
log.Errorf("file handle read %s: %v", fh.f.fullpath(), err)
}
glog.V(4).Infof("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
log.Tracef("file handle read %s [%d,%d] %d : %v", fh.f.fullpath(), offset, offset+int64(totalRead), totalRead, err)
return int64(totalRead), err
}
@ -150,7 +150,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
}
fh.f.entry.Attributes.FileSize = uint64(max(req.Offset+int64(len(data)), int64(fh.f.entry.Attributes.FileSize)))
glog.V(4).Infof("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
log.Tracef("%v write [%d,%d) %d", fh.f.fullpath(), req.Offset, req.Offset+int64(len(req.Data)), len(req.Data))
fh.dirtyPages.AddPage(req.Offset, data)
@ -169,7 +169,7 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
log.Tracef("Release %v fh %d", fh.f.fullpath(), fh.handle)
fh.Lock()
defer fh.Unlock()
@ -177,7 +177,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
fh.f.isOpen--
if fh.f.isOpen < 0 {
glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
log.Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
fh.f.isOpen = 0
return nil
}
@ -185,7 +185,7 @@ func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) err
if fh.f.isOpen == 0 {
if err := fh.doFlush(ctx, req.Header); err != nil {
glog.Errorf("Release doFlush %s: %v", fh.f.Name, err)
log.Errorf("Release doFlush %s: %v", fh.f.Name, err)
}
// stop the goroutine
@ -211,7 +211,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
// flush works at fh level
// send the data to the OS
glog.V(4).Infof("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
log.Tracef("doFlush %s fh %d", fh.f.fullpath(), fh.handle)
fh.dirtyPages.saveExistingPagesToStorage()
@ -250,9 +250,9 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
Signatures: []int32{fh.f.wfs.signature},
}
glog.V(4).Infof("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
log.Tracef("%s set chunks: %v", fh.f.fullpath(), len(fh.f.entry.Chunks))
for i, chunk := range fh.f.entry.Chunks {
glog.V(4).Infof("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
log.Tracef("%s chunks %d: %v [%d,%d)", fh.f.fullpath(), i, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))
}
manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(fh.f.entry.Chunks)
@ -261,7 +261,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
if manifestErr != nil {
// not good, but should be ok
glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
log.Infof("MaybeManifestize: %v", manifestErr)
}
fh.f.entry.Chunks = append(chunks, manifestChunks...)
@ -269,7 +269,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
defer fh.f.wfs.mapPbIdFromFilerToLocal(request.Entry)
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
log.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
return fmt.Errorf("fh flush create %s: %v", fh.f.fullpath(), err)
}
@ -283,7 +283,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
}
if err != nil {
glog.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
log.Errorf("%v fh %d flush: %v", fh.f.fullpath(), fh.handle, err)
return fuse.EIO
}

View File

@ -8,7 +8,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/filer/leveldb"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/bounded_tree"
)
@ -44,7 +44,7 @@ func openMetaStore(dbFolder string) filer.VirtualFilerStore {
}
if err := store.Initialize(config, ""); err != nil {
glog.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
log.Fatalf("Failed to initialize metadata cache store for %s: %+v", store.GetName(), err)
}
return filer.NewFilerStoreWrapper(store)
@ -72,7 +72,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
// skip the unnecessary deletion
// leave the update to the following InsertEntry operation
} else {
glog.V(3).Infof("DeleteEntry %s/%s", oldPath, oldPath.Name())
log.Tracef("DeleteEntry %s/%s", oldPath, oldPath.Name())
if err := mc.localStore.DeleteEntry(ctx, oldPath); err != nil {
return err
}
@ -85,7 +85,7 @@ func (mc *MetaCache) AtomicUpdateEntryFromFiler(ctx context.Context, oldPath uti
if newEntry != nil {
newDir, _ := newEntry.DirAndName()
if mc.visitedBoundary.HasVisited(util.FullPath(newDir)) {
glog.V(3).Infof("InsertEntry %s/%s", newDir, newEntry.Name())
log.Tracef("InsertEntry %s/%s", newDir, newEntry.Name())
if err := mc.localStore.InsertEntry(ctx, newEntry); err != nil {
return err
}

View File

@ -5,7 +5,7 @@ import (
"fmt"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -14,13 +14,13 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
return mc.visitedBoundary.EnsureVisited(dirPath, func(path util.FullPath) (childDirectories []string, err error) {
glog.V(4).Infof("ReadDirAllEntries %s ...", path)
log.Tracef("ReadDirAllEntries %s ...", path)
util.Retry("ReadDirAllEntries", func() error {
err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
entry := filer.FromPbEntry(string(dirPath), pbEntry)
if err := mc.doInsertEntry(context.Background(), entry); err != nil {
glog.V(0).Infof("read %s: %v", entry.FullPath, err)
log.Infof("read %s: %v", entry.FullPath, err)
return err
}
if entry.IsDirectory() {

View File

@ -7,7 +7,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -28,7 +28,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
var newEntry *filer.Entry
if message.OldEntry != nil {
oldPath = util.NewFullPath(dir, message.OldEntry.Name)
glog.V(4).Infof("deleting %v", oldPath)
log.Tracef("deleting %v", oldPath)
}
if message.NewEntry != nil {
@ -36,7 +36,7 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
dir = message.NewParentPath
}
key := util.NewFullPath(dir, message.NewEntry.Name)
glog.V(4).Infof("creating %v", key)
log.Tracef("creating %v", key)
newEntry = filer.FromPbEntry(dir, message.NewEntry)
}
err := mc.AtomicUpdateEntryFromFiler(context.Background(), oldPath, newEntry)
@ -73,13 +73,13 @@ func SubscribeMetaEvents(mc *MetaCache, selfSignature int32, client filer_pb.Fil
}
if err := processEventFn(resp); err != nil {
glog.Fatalf("process %v: %v", resp, err)
log.Fatalf("process %v: %v", resp, err)
}
lastTsNs = resp.TsNs
}
})
if err != nil {
glog.Errorf("subscribing filer meta change: %v", err)
log.Errorf("subscribing filer meta change: %v", err)
}
time.Sleep(time.Second)
}

View File

@ -17,7 +17,7 @@ import (
"github.com/seaweedfs/fuse/fs"
"github.com/chrislusf/seaweedfs/weed/filesys/meta_cache"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/util/chunk_cache"
@ -128,7 +128,7 @@ func (wfs *WFS) Root() (fs.Node, error) {
func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
fullpath := file.fullpath()
glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
log.Tracef("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
@ -156,7 +156,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
wfs.handlesLock.Lock()
defer wfs.handlesLock.Unlock()
glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
log.Tracef("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
delete(wfs.handles, fullpath.AsInode())
@ -166,7 +166,7 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
// Statfs is called to obtain file system metadata. Implements fuse.FSStatfser
func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {
glog.V(4).Infof("reading fs stats: %+v", req)
log.Tracef("reading fs stats: %+v", req)
if wfs.stats.lastChecked < time.Now().Unix()-20 {
@ -178,13 +178,13 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
Ttl: fmt.Sprintf("%ds", wfs.option.TtlSec),
}
glog.V(4).Infof("reading filer stats: %+v", request)
log.Tracef("reading filer stats: %+v", request)
resp, err := client.Statistics(context.Background(), request)
if err != nil {
glog.V(0).Infof("reading filer stats %v: %v", request, err)
log.Infof("reading filer stats %v: %v", request, err)
return err
}
glog.V(4).Infof("read filer stats: %+v", resp)
log.Tracef("read filer stats: %+v", resp)
wfs.stats.TotalSize = resp.TotalSize
wfs.stats.UsedSize = resp.UsedSize
@ -194,7 +194,7 @@ func (wfs *WFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.
return nil
})
if err != nil {
glog.V(0).Infof("filer Statistics: %v", err)
log.Infof("filer Statistics: %v", err)
return err
}
}

View File

@ -6,7 +6,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
)
@ -24,7 +24,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
}
dataChunks, manifestResolveErr := filer.ResolveOneChunkManifest(filer.LookupFn(wfs), chunk)
if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
log.Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
fileIds = append(fileIds, dChunk.GetFileIdString())
@ -49,7 +49,7 @@ func (wfs *WFS) deleteFileIds(grpcDialOption grpc.DialOption, client filer_pb.Se
m := make(map[string]operation.LookupResult)
glog.V(4).Infof("deleteFileIds lookup volume id locations: %v", vids)
log.Tracef("deleteFileIds lookup volume id locations: %v", vids)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: vids,
})

View File

@ -6,7 +6,7 @@ import (
"io"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
@ -32,7 +32,7 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@ -55,11 +55,11 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
uploadResult, err, data := operation.Upload(fileUrl, filename, wfs.option.Cipher, reader, false, "", nil, auth)
if err != nil {
glog.V(0).Infof("upload data %v to %s: %v", filename, fileUrl, err)
log.Infof("upload data %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
}

View File

@ -111,7 +111,7 @@ func listxattr(entry *filer_pb.Entry, req *fuse.ListxattrRequest, resp *fuse.Lis
func (wfs *WFS) maybeLoadEntry(dir, name string) (entry *filer_pb.Entry, err error) {
fullpath := util.NewFullPath(dir, name)
// glog.V(3).Infof("read entry cache miss %s", fullpath)
// log.Tracef("read entry cache miss %s", fullpath)
// read from async meta cache
meta_cache.EnsureVisited(wfs.metaCache, wfs, util.FullPath(dir))

View File

@ -24,7 +24,7 @@ The comment from glog.go introduces the ideas:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
log.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:

View File

@ -22,7 +22,7 @@
//
// glog.Info("Prepare to repel boarders")
//
// glog.Fatalf("Initialization failed: %s", err)
// log.Fatalf("Initialization failed: %s", err)
//
// See the documentation for the V function for an explanation of these examples:
//

View File

@ -10,7 +10,7 @@ import (
"github.com/disintegration/imaging"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
)
func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (resized io.ReadSeeker, w int, h int) {
@ -50,7 +50,7 @@ func Resized(ext string, read io.ReadSeeker, width, height int, mode string) (re
}
return bytes.NewReader(buf.Bytes()), dstImage.Bounds().Dx(), dstImage.Bounds().Dy()
} else {
glog.Error(err)
log.Error(err)
}
return read, 0, 0
}

View File

@ -5,7 +5,7 @@ import (
"fmt"
"io"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -34,7 +34,7 @@ func (broker *MessageBroker) appendToFile(targetFile string, topicConfig *messag
_, err := client.AppendToEntry(context.Background(), request)
if err != nil {
glog.V(0).Infof("append to file %v: %v", request, err)
log.Infof("append to file %v: %v", request, err)
return err
}
@ -61,7 +61,7 @@ func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConf
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@ -98,7 +98,7 @@ func (broker *MessageBroker) WithFilerClient(fn func(filer_pb.SeaweedFilerClient
if err == io.EOF {
return
}
glog.V(0).Infof("fail to connect to %s: %v", filer, err)
log.Infof("fail to connect to %s: %v", filer, err)
} else {
break
}

View File

@ -5,7 +5,7 @@ import (
"fmt"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
@ -78,11 +78,11 @@ func (broker *MessageBroker) checkFilers() {
found = true
break
}
glog.V(0).Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
log.Infof("failed to read masters from %+v: %v", broker.option.Filers, err)
time.Sleep(time.Second)
}
}
glog.V(0).Infof("received master list: %s", masters)
log.Infof("received master list: %s", masters)
// contact each masters for filers
var filers []string
@ -105,11 +105,11 @@ func (broker *MessageBroker) checkFilers() {
found = true
break
}
glog.V(0).Infof("failed to list filers: %v", err)
log.Infof("failed to list filers: %v", err)
time.Sleep(time.Second)
}
}
glog.V(0).Infof("received filer list: %s", filers)
log.Infof("received filer list: %s", filers)
broker.option.Filers = filers

View File

@ -8,7 +8,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
)
@ -65,7 +65,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
for {
// println("recv")
in, err := stream.Recv()
// glog.V(0).Infof("recieved %v err: %v", in, err)
// log.Infof("recieved %v err: %v", in, err)
if err == io.EOF {
return nil
}
@ -81,7 +81,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
data, err := proto.Marshal(in.Data)
if err != nil {
glog.Errorf("marshall error: %v\n", err)
log.Errorf("marshall error: %v\n", err)
continue
}
@ -97,7 +97,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
}
if err := broker.appendToFile(tpDir+"/"+md5File, topicConfig, md5hash.Sum(nil)); err != nil {
glog.V(0).Infof("err writing %s: %v", md5File, err)
log.Infof("err writing %s: %v", md5File, err)
}
// fmt.Printf("received md5 %X\n", md5hash.Sum(nil))
@ -105,7 +105,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
// send the close ack
// println("server send ack closing")
if err := stream.Send(&messaging_pb.PublishResponse{IsClosed: true}); err != nil {
glog.V(0).Infof("err sending close response: %v", err)
log.Infof("err sending close response: %v", err)
}
return nil

View File

@ -10,7 +10,7 @@ import (
"github.com/golang/protobuf/proto"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
)
@ -76,7 +76,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
Data: m,
})
if err != nil {
glog.V(0).Infof("=> subscriber %v: %+v", subscriberId, err)
log.Infof("=> subscriber %v: %+v", subscriberId, err)
}
return err
}
@ -84,12 +84,12 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
eachLogEntryFn := func(logEntry *filer_pb.LogEntry) error {
m := &messaging_pb.Message{}
if err = proto.Unmarshal(logEntry.Data, m); err != nil {
glog.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
log.Errorf("unexpected unmarshal messaging_pb.Message: %v", err)
return err
}
// fmt.Printf("sending : %d bytes ts %d\n", len(m.Value), logEntry.TsNs)
if err = eachMessageFn(m); err != nil {
glog.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
log.Errorf("sending %d bytes to %s: %s", len(m.Value), subscriberId, err)
return err
}
if m.IsClose {
@ -122,7 +122,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
return isConnected
}, eachLogEntryFn)
if err != nil {
glog.Errorf("processed to %v: %v", lastReadTime, err)
log.Errorf("processed to %v: %v", lastReadTime, err)
time.Sleep(3127 * time.Millisecond)
if err != log_buffer.ResumeError {
break

View File

@ -6,7 +6,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -52,7 +52,7 @@ func (broker *MessageBroker) keepConnectedToOneFiler() {
defer cancel()
stream, err := client.KeepConnected(ctx)
if err != nil {
glog.V(0).Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("%s:%d failed to keep connected to %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
@ -67,24 +67,24 @@ func (broker *MessageBroker) keepConnectedToOneFiler() {
Name: broker.option.Ip,
GrpcPort: uint32(broker.option.Port),
}); err != nil {
glog.V(0).Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("broker %s:%d failed to init at %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
// TODO send events of adding/removing topics
glog.V(0).Infof("conntected with filer: %v", filer)
log.Infof("conntected with filer: %v", filer)
for {
if err := stream.Send(&filer_pb.KeepConnectedRequest{
Name: broker.option.Ip,
GrpcPort: uint32(broker.option.Port),
}); err != nil {
glog.V(0).Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("%s:%d failed to sendto %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
// println("send heartbeat")
if _, err := stream.Recv(); err != nil {
glog.V(0).Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
log.Infof("%s:%d failed to receive from %s: %v", broker.option.Ip, broker.option.Port, filer, err)
return err
}
// println("received reply")

View File

@ -6,7 +6,7 @@ import (
"time"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"github.com/chrislusf/seaweedfs/weed/util/log_buffer"
)
@ -65,7 +65,7 @@ func (tm *TopicManager) buildLogBuffer(tl *TopicControl, tp TopicPartition, topi
)
if err := tm.broker.appendToFile(targetFile, topicConfig, buf); err != nil {
glog.V(0).Infof("log write failed %s: %v", targetFile, err)
log.Infof("log write failed %s: %v", targetFile, err)
}
}
logBuffer := log_buffer.NewLogBuffer(time.Minute, flushFn, func() {

View File

@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -28,8 +28,8 @@ func (k *AwsSqsPub) GetName() string {
}
func (k *AwsSqsPub) Initialize(configuration util.Configuration, prefix string) (err error) {
glog.V(0).Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
log.Infof("filer.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
log.Infof("filer.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),

View File

@ -1,7 +1,7 @@
package notification
import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
"github.com/spf13/viper"
@ -32,11 +32,11 @@ func LoadConfiguration(config *viper.Viper, prefix string) {
for _, queue := range MessageQueues {
if config.GetBool(prefix + queue.GetName() + ".enabled") {
if err := queue.Initialize(config, prefix+queue.GetName()+"."); err != nil {
glog.Fatalf("Failed to initialize notification for %s: %+v",
log.Fatalf("Failed to initialize notification for %s: %+v",
queue.GetName(), err)
}
Queue = queue
glog.V(0).Infof("Configure notification message queue for %s", queue.GetName())
log.Infof("Configure notification message queue for %s", queue.GetName())
return
}
}
@ -50,7 +50,7 @@ func validateOneEnabledQueue(config *viper.Viper) {
if enabledQueue == "" {
enabledQueue = queue.GetName()
} else {
glog.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName())
log.Fatalf("Notification message queue is enabled for both %s and %s", enabledQueue, queue.GetName())
}
}
}

View File

@ -22,7 +22,7 @@ import (
"gocloud.dev/pubsub"
_ "gocloud.dev/pubsub/awssnssqs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
// _ "gocloud.dev/pubsub/azuresb"
@ -46,10 +46,10 @@ func (k *GoCDKPubSub) GetName() string {
func (k *GoCDKPubSub) Initialize(configuration util.Configuration, prefix string) error {
k.topicURL = configuration.GetString(prefix + "topic_url")
glog.V(0).Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
log.Infof("notification.gocdk_pub_sub.topic_url: %v", k.topicURL)
topic, err := pubsub.OpenTopic(context.Background(), k.topicURL)
if err != nil {
glog.Fatalf("Failed to open topic: %v", err)
log.Fatalf("Failed to open topic: %v", err)
}
k.topic = topic
return nil

View File

@ -6,7 +6,7 @@ import (
"os"
"cloud.google.com/go/pubsub"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -26,8 +26,8 @@ func (k *GooglePubSub) GetName() string {
}
func (k *GooglePubSub) Initialize(configuration util.Configuration, prefix string) (err error) {
glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
log.Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
log.Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetString(prefix+"google_application_credentials"),
configuration.GetString(prefix+"project_id"),
@ -43,13 +43,13 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top
var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found {
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
}
}
client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
log.Fatalf("Failed to create client: %v", err)
}
k.topic = client.Topic(topicName)
@ -57,11 +57,11 @@ func (k *GooglePubSub) initialize(google_application_credentials, projectId, top
if !exists {
k.topic, err = client.CreateTopic(ctx, topicName)
if err != nil {
glog.Fatalf("Failed to create topic %s: %v", topicName, err)
log.Fatalf("Failed to create topic %s: %v", topicName, err)
}
}
} else {
glog.Fatalf("Failed to check topic %s: %v", topicName, err)
log.Fatalf("Failed to check topic %s: %v", topicName, err)
}
return nil

View File

@ -2,7 +2,7 @@ package kafka
import (
"github.com/Shopify/sarama"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -22,8 +22,8 @@ func (k *KafkaQueue) GetName() string {
}
func (k *KafkaQueue) Initialize(configuration util.Configuration, prefix string) (err error) {
glog.V(0).Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
glog.V(0).Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
log.Infof("filer.notification.kafka.hosts: %v\n", configuration.GetStringSlice(prefix+"hosts"))
log.Infof("filer.notification.kafka.topic: %v\n", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetStringSlice(prefix+"hosts"),
configuration.GetString(prefix+"topic"),
@ -67,7 +67,7 @@ func (k *KafkaQueue) handleSuccess() {
for {
pm := <-k.producer.Successes()
if pm != nil {
glog.V(3).Infof("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
log.Tracef("producer message success, partition:%d offset:%d key:%v", pm.Partition, pm.Offset, pm.Key)
}
}
}
@ -76,7 +76,7 @@ func (k *KafkaQueue) handleError() {
for {
err := <-k.producer.Errors()
if err != nil {
glog.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
log.Errorf("producer message error, partition:%d offset:%d key:%v value:%s error(%v) topic:%s", err.Msg.Partition, err.Msg.Offset, err.Msg.Key, err.Msg.Value, err.Err, k.topic)
}
}
}

View File

@ -1,7 +1,7 @@
package kafka
import (
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/notification"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -24,6 +24,6 @@ func (k *LogQueue) Initialize(configuration util.Configuration, prefix string) (
func (k *LogQueue) SendMessage(key string, message proto.Message) (err error) {
glog.V(0).Infof("%v: %+v", key, message)
log.Infof("%v: %+v", key, message)
return nil
}

View File

@ -12,7 +12,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -57,7 +57,7 @@ func LoadChunkManifest(buffer []byte, isCompressed bool) (*ChunkManifest, error)
if isCompressed {
var err error
if buffer, err = util.DecompressData(buffer); err != nil {
glog.V(0).Infof("fail to decompress chunk manifest: %v", err)
log.Infof("fail to decompress chunk manifest: %v", err)
}
}
cm := ChunkManifest{}
@ -79,12 +79,12 @@ func (cm *ChunkManifest) DeleteChunks(master string, usePublicUrl bool, grpcDial
}
results, err := DeleteFiles(master, usePublicUrl, grpcDialOption, fileIds)
if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err)
log.Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err)
}
for _, result := range results {
if result.Error != "" {
glog.V(0).Infof("delete file %+v: %v", result.FileId, result.Error)
log.Infof("delete file %+v: %v", result.FileId, result.Error)
return fmt.Errorf("chunk delete %v: %v", result.FileId, result.Error)
}
}

View File

@ -7,7 +7,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
@ -32,7 +32,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
sepIndex := strings.LastIndex(volumeServer, ":")
port, err := strconv.Atoi(volumeServer[sepIndex+1:])
if err != nil {
glog.Errorf("failed to parse volume server address: %v", volumeServer)
log.Errorf("failed to parse volume server address: %v", volumeServer)
return "", err
}
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil

View File

@ -6,7 +6,7 @@ import (
"sync"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
)
var ErrorNotFound = errors.New("not found")
@ -23,7 +23,7 @@ type VidCache struct {
func (vc *VidCache) Get(vid string) ([]Location, error) {
id, err := strconv.Atoi(vid)
if err != nil {
glog.V(1).Infof("Unknown volume id %s", vid)
log.Debugf("Unknown volume id %s", vid)
return nil, err
}
vc.RLock()
@ -42,7 +42,7 @@ func (vc *VidCache) Get(vid string) ([]Location, error) {
func (vc *VidCache) Set(vid string, locations []Location, duration time.Duration) {
id, err := strconv.Atoi(vid)
if err != nil {
glog.V(1).Infof("Unknown volume id %s", vid)
log.Debugf("Unknown volume id %s", vid)
return
}
vc.Lock()

View File

@ -11,7 +11,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/security"
)
@ -91,14 +91,14 @@ func NewFileParts(fullPathFilenames []string) (ret []FilePart, err error) {
func newFilePart(fullPathFilename string) (ret FilePart, err error) {
fh, openErr := os.Open(fullPathFilename)
if openErr != nil {
glog.V(0).Info("Failed to open file: ", fullPathFilename)
log.Info("Failed to open file: ", fullPathFilename)
return ret, openErr
}
ret.Reader = fh
fi, fiErr := fh.Stat()
if fiErr != nil {
glog.V(0).Info("Failed to stat file:", fullPathFilename)
log.Info("Failed to stat file:", fullPathFilename)
return ret, fiErr
}
ret.ModTime = fi.ModTime().UTC().Unix()
@ -210,7 +210,7 @@ func (fi FilePart) Upload(maxMB int, master string, usePublicUrl bool, jwt secur
func upload_one_chunk(filename string, reader io.Reader, master,
fileUrl string, jwt security.EncodedJwt,
) (size uint32, e error) {
glog.V(4).Info("Uploading part ", filename, " to ", fileUrl, "...")
log.Trace("Uploading part ", filename, " to ", fileUrl, "...")
uploadResult, uploadError, _ := Upload(fileUrl, filename, false, reader, false, "", nil, jwt)
if uploadError != nil {
return 0, uploadError
@ -223,7 +223,7 @@ func upload_chunked_file_manifest(fileUrl string, manifest *ChunkManifest, jwt s
if e != nil {
return e
}
glog.V(4).Info("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
log.Trace("Uploading chunks manifest ", manifest.Name, " to ", fileUrl, "...")
u, _ := url.Parse(fileUrl)
q := u.Query()
q.Set("cm", "true")

View File

@ -15,7 +15,7 @@ import (
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/util"
@ -97,7 +97,7 @@ func retriedUploadData(uploadUrl string, filename string, cipher bool, data []by
if err == nil {
return
} else {
glog.Warningf("uploading to %s: %v", uploadUrl, err)
log.Warnf("uploading to %s: %v", uploadUrl, err)
}
}
return
@ -203,22 +203,22 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
file_writer, cp_err := body_writer.CreatePart(h)
if cp_err != nil {
glog.V(0).Infoln("error creating form file", cp_err.Error())
log.Infoln("error creating form file", cp_err.Error())
return nil, cp_err
}
if err := fillBufferFunction(file_writer); err != nil {
glog.V(0).Infoln("error copying data", err)
log.Infoln("error copying data", err)
return nil, err
}
content_type := body_writer.FormDataContentType()
if err := body_writer.Close(); err != nil {
glog.V(0).Infoln("error closing body", err)
log.Infoln("error closing body", err)
return nil, err
}
req, postErr := http.NewRequest("POST", uploadUrl, bytes.NewReader(buf.Bytes()))
if postErr != nil {
glog.V(1).Infof("create upload request %s: %v", uploadUrl, postErr)
log.Debugf("create upload request %s: %v", uploadUrl, postErr)
return nil, fmt.Errorf("create upload request %s: %v", uploadUrl, postErr)
}
req.Header.Set("Content-Type", content_type)
@ -231,7 +231,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
// print("+")
resp, post_err := HttpClient.Do(req)
if post_err != nil {
glog.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
log.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
debug.PrintStack()
return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
}
@ -252,7 +252,7 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
unmarshal_err := json.Unmarshal(resp_body, &ret)
if unmarshal_err != nil {
glog.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
log.Errorf("unmarshal %s: %v", uploadUrl, string(resp_body))
return nil, fmt.Errorf("unmarshal %v: %v", uploadUrl, unmarshal_err)
}
if ret.Error != "" {

View File

@ -10,7 +10,7 @@ import (
"strings"
"time"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -35,18 +35,18 @@ func GetEntry(filerClient FilerClient, fullFilePath util.FullPath) (entry *Entry
Name: name,
}
// glog.V(3).Infof("read %s request: %v", fullFilePath, request)
// log.Tracef("read %s request: %v", fullFilePath, request)
resp, err := LookupEntry(client, request)
if err != nil {
if err == ErrNotFound {
return nil
}
glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
log.Tracef("read %s %v: %v", fullFilePath, resp, err)
return err
}
if resp.Entry == nil {
// glog.V(3).Infof("read %s entry: %v", fullFilePath, entry)
// log.Tracef("read %s entry: %v", fullFilePath, entry)
return nil
}
@ -83,7 +83,7 @@ func doList(filerClient FilerClient, fullDirPath util.FullPath, prefix string, f
InclusiveStartFrom: inclusive,
}
glog.V(4).Infof("read directory: %v", request)
log.Tracef("read directory: %v", request)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stream, err := client.ListEntries(ctx, request)
@ -130,14 +130,14 @@ func Exists(filerClient FilerClient, parentDirectoryPath string, entryName strin
Name: entryName,
}
glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
log.Tracef("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
resp, err := LookupEntry(client, request)
if err != nil {
if err == ErrNotFound {
exists = false
return nil
}
glog.V(0).Infof("exists entry %v: %v", request, err)
log.Infof("exists entry %v: %v", request, err)
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
}
@ -173,9 +173,9 @@ func Mkdir(filerClient FilerClient, parentDirectoryPath string, dirName string,
Entry: entry,
}
glog.V(1).Infof("mkdir: %v", request)
log.Debugf("mkdir: %v", request)
if err := CreateEntry(client, request); err != nil {
glog.V(0).Infof("mkdir %v: %v", request, err)
log.Infof("mkdir %v: %v", request, err)
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
}
@ -204,9 +204,9 @@ func MkFile(filerClient FilerClient, parentDirectoryPath string, fileName string
Entry: entry,
}
glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
log.Debugf("create file: %s/%s", parentDirectoryPath, fileName)
if err := CreateEntry(client, request); err != nil {
glog.V(0).Infof("create file %v:%v", request, err)
log.Infof("create file %v:%v", request, err)
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
}

View File

@ -6,7 +6,7 @@ import (
"fmt"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/storage/needle"
"github.com/golang/protobuf/proto"
"github.com/viant/ptrie"
@ -88,11 +88,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
resp, err := client.CreateEntry(context.Background(), request)
if err != nil {
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
log.Debugf("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
return fmt.Errorf("CreateEntry: %v", err)
}
if resp.Error != "" {
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
log.Debugf("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
return fmt.Errorf("CreateEntry : %v", resp.Error)
}
return nil
@ -101,7 +101,7 @@ func CreateEntry(client SeaweedFilerClient, request *CreateEntryRequest) error {
func UpdateEntry(client SeaweedFilerClient, request *UpdateEntryRequest) error {
_, err := client.UpdateEntry(context.Background(), request)
if err != nil {
glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
log.Debugf("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
return fmt.Errorf("UpdateEntry: %v", err)
}
return nil
@ -113,7 +113,7 @@ func LookupEntry(client SeaweedFilerClient, request *LookupDirectoryEntryRequest
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
return nil, ErrNotFound
}
glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
log.Tracef("read %s/%v: %v", request.Directory, request.Name, err)
return nil, fmt.Errorf("LookupEntry1: %v", err)
}
if resp.Entry == nil {

View File

@ -10,7 +10,7 @@ import (
"github.com/golang/protobuf/jsonpb"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/volume_server_pb"
)
@ -19,28 +19,28 @@ func MaybeLoadVolumeInfo(fileName string) (*volume_server_pb.VolumeInfo, bool, e
volumeInfo := &volume_server_pb.VolumeInfo{}
glog.V(1).Infof("maybeLoadVolumeInfo checks %s", fileName)
log.Debugf("maybeLoadVolumeInfo checks %s", fileName)
if exists, canRead, _, _, _ := util.CheckFile(fileName); !exists || !canRead {
if !exists {
return volumeInfo, false, nil
}
if !canRead {
glog.Warningf("can not read %s", fileName)
log.Warnf("can not read %s", fileName)
return volumeInfo, false, fmt.Errorf("can not read %s", fileName)
}
return volumeInfo, false, nil
}
glog.V(1).Infof("maybeLoadVolumeInfo reads %s", fileName)
log.Debugf("maybeLoadVolumeInfo reads %s", fileName)
tierData, readErr := ioutil.ReadFile(fileName)
if readErr != nil {
glog.Warningf("fail to read %s : %v", fileName, readErr)
log.Warnf("fail to read %s : %v", fileName, readErr)
return volumeInfo, false, fmt.Errorf("fail to read %s : %v", fileName, readErr)
}
glog.V(1).Infof("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
log.Debugf("maybeLoadVolumeInfo Unmarshal volume info %v", fileName)
if err := jsonpb.Unmarshal(bytes.NewReader(tierData), volumeInfo); err != nil {
glog.Warningf("unmarshal error: %v", err)
log.Warnf("unmarshal error: %v", err)
return volumeInfo, false, fmt.Errorf("unmarshal error: %v", err)
}

View File

@ -2,7 +2,7 @@ package repl_util
import (
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/replication/source"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -23,9 +23,9 @@ func CopyFromChunkViews(chunkViews []*filer.ChunkView, filerSource *source.Filer
writeErr = writeFunc(data)
})
if err != nil {
glog.V(1).Infof("read from %s: %v", fileUrl, err)
log.Debugf("read from %s: %v", fileUrl, err)
} else if writeErr != nil {
glog.V(1).Infof("copy from %s: %v", fileUrl, writeErr)
log.Debugf("copy from %s: %v", fileUrl, writeErr)
} else {
break
}

View File

@ -7,7 +7,7 @@ import (
"google.golang.org/grpc"
"strings"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@ -37,28 +37,28 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
return nil
}
if !strings.HasPrefix(key, r.source.Dir) {
glog.V(4).Infof("skipping %v outside of %v", key, r.source.Dir)
log.Tracef("skipping %v outside of %v", key, r.source.Dir)
return nil
}
newKey := util.Join(r.sink.GetSinkToDirectory(), key[len(r.source.Dir):])
glog.V(3).Infof("replicate %s => %s", key, newKey)
log.Tracef("replicate %s => %s", key, newKey)
key = newKey
if message.OldEntry != nil && message.NewEntry == nil {
glog.V(4).Infof("deleting %v", key)
log.Tracef("deleting %v", key)
return r.sink.DeleteEntry(key, message.OldEntry.IsDirectory, message.DeleteChunks, message.Signatures)
}
if message.OldEntry == nil && message.NewEntry != nil {
glog.V(4).Infof("creating %v", key)
log.Tracef("creating %v", key)
return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
}
if message.OldEntry == nil && message.NewEntry == nil {
glog.V(0).Infof("weird message %+v", message)
log.Infof("weird message %+v", message)
return nil
}
foundExisting, err := r.sink.UpdateEntry(key, message.OldEntry, message.NewParentPath, message.NewEntry, message.DeleteChunks, message.Signatures)
if foundExisting {
glog.V(4).Infof("updated %v", key)
log.Tracef("updated %v", key)
return err
}
@ -67,7 +67,7 @@ func (r *Replicator) Replicate(ctx context.Context, key string, message *filer_p
return fmt.Errorf("delete old entry %v: %v", key, err)
}
glog.V(4).Infof("creating missing %v", key)
log.Tracef("creating missing %v", key)
return r.sink.CreateEntry(key, message.NewEntry, message.Signatures)
}

View File

@ -10,7 +10,7 @@ import (
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@ -56,7 +56,7 @@ func (g *AzureSink) initialize(accountName, accountKey, container, dir string) e
// Use your Storage account's name and key to create a credential object.
credential, err := azblob.NewSharedKeyCredential(accountName, accountKey)
if err != nil {
glog.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
log.Fatalf("failed to create Azure credential with account name:%s key:%s", accountName, accountKey)
}
// Create a request pipeline that is used to process HTTP(S) requests and responses.

View File

@ -8,7 +8,7 @@ import (
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
@ -82,7 +82,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
resp, err := client.AssignVolume(context.Background(), request)
if err != nil {
glog.V(0).Infof("assign volume failure %v: %v", request, err)
log.Infof("assign volume failure %v: %v", request, err)
return err
}
if resp.Error != "" {
@ -98,16 +98,16 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)
glog.V(4).Infof("replicating %s to %s header:%+v", filename, fileUrl, header)
log.Tracef("replicating %s to %s header:%+v", filename, fileUrl, header)
// fetch data as is, regardless whether it is encrypted or not
uploadResult, err, _ := operation.Upload(fileUrl, filename, false, resp.Body, "gzip" == header.Get("Content-Encoding"), header.Get("Content-Type"), nil, auth)
if err != nil {
glog.V(0).Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
log.Infof("upload source data %v to %s: %v", sourceChunk.GetFileIdString(), fileUrl, err)
return "", fmt.Errorf("upload data: %v", err)
}
if uploadResult.Error != "" {
glog.V(0).Infof("upload failure %v to %s: %v", filename, fileUrl, err)
log.Infof("upload failure %v to %s: %v", filename, fileUrl, err)
return "", fmt.Errorf("upload result: %v", uploadResult.Error)
}

View File

@ -9,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@ -68,10 +68,10 @@ func (fs *FilerSink) DeleteEntry(key string, isDirectory, deleteIncludeChunks bo
dir, name := util.FullPath(key).DirAndName()
glog.V(4).Infof("delete entry: %v", key)
log.Tracef("delete entry: %v", key)
err := filer_pb.Remove(fs, dir, name, deleteIncludeChunks, true, true, true, signatures)
if err != nil {
glog.V(0).Infof("delete entry %s: %v", key, err)
log.Infof("delete entry %s: %v", key, err)
return fmt.Errorf("delete entry %s: %v", key, err)
}
return nil
@ -88,10 +88,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Directory: dir,
Name: name,
}
glog.V(1).Infof("lookup: %v", lookupRequest)
log.Debugf("lookup: %v", lookupRequest)
if resp, err := filer_pb.LookupEntry(client, lookupRequest); err == nil {
if filer.ETag(resp.Entry) == filer.ETag(entry) {
glog.V(3).Infof("already replicated %s", key)
log.Tracef("already replicated %s", key)
return nil
}
}
@ -100,10 +100,10 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
if err != nil {
// only warning here since the source chunk may have been deleted already
glog.Warningf("replicate entry chunks %s: %v", key, err)
log.Warnf("replicate entry chunks %s: %v", key, err)
}
glog.V(4).Infof("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
log.Tracef("replicated %s %+v ===> %+v", key, entry.Chunks, replicatedChunks)
request := &filer_pb.CreateEntryRequest{
Directory: dir,
@ -117,9 +117,9 @@ func (fs *FilerSink) CreateEntry(key string, entry *filer_pb.Entry, signatures [
Signatures: signatures,
}
glog.V(3).Infof("create: %v", request)
log.Tracef("create: %v", request)
if err := filer_pb.CreateEntry(client, request); err != nil {
glog.V(0).Infof("create entry %s: %v", key, err)
log.Infof("create entry %s: %v", key, err)
return fmt.Errorf("create entry %s: %v", key, err)
}
@ -140,10 +140,10 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
Name: name,
}
glog.V(4).Infof("lookup entry: %v", request)
log.Tracef("lookup entry: %v", request)
resp, err := filer_pb.LookupEntry(client, request)
if err != nil {
glog.V(0).Infof("lookup %s: %v", key, err)
log.Infof("lookup %s: %v", key, err)
return err
}
@ -156,16 +156,16 @@ func (fs *FilerSink) UpdateEntry(key string, oldEntry *filer_pb.Entry, newParent
return false, fmt.Errorf("lookup %s: %v", key, err)
}
glog.V(4).Infof("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
log.Tracef("oldEntry %+v, newEntry %+v, existingEntry: %+v", oldEntry, newEntry, existingEntry)
if existingEntry.Attributes.Mtime > newEntry.Attributes.Mtime {
// skip if already changed
// this usually happens when the messages are not ordered
glog.V(2).Infof("late updates %s", key)
log.Debugf("late updates %s", key)
} else if filer.ETag(newEntry) == filer.ETag(existingEntry) {
// skip if no change
// this usually happens when retrying the replication
glog.V(3).Infof("already replicated %s", key)
log.Tracef("already replicated %s", key)
} else {
// find out what changed
deletedChunks, newChunks, err := compareChunks(filer.LookupFn(fs), oldEntry, newEntry)

View File

@ -10,7 +10,7 @@ import (
"google.golang.org/api/option"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@ -57,12 +57,12 @@ func (g *GcsSink) initialize(google_application_credentials, bucketName, dir str
var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found {
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in replication.toml")
}
}
client, err := storage.NewClient(context.Background(), option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
log.Fatalf("Failed to create client: %v", err)
}
g.client = client

View File

@ -13,7 +13,7 @@ import (
"github.com/aws/aws-sdk-go/service/s3/s3iface"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/replication/sink"
"github.com/chrislusf/seaweedfs/weed/replication/source"
@ -42,10 +42,10 @@ func (s3sink *S3Sink) GetSinkToDirectory() string {
}
func (s3sink *S3Sink) Initialize(configuration util.Configuration, prefix string) error {
glog.V(0).Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
glog.V(0).Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
glog.V(0).Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
log.Infof("sink.s3.region: %v", configuration.GetString(prefix+"region"))
log.Infof("sink.s3.bucket: %v", configuration.GetString(prefix+"bucket"))
log.Infof("sink.s3.directory: %v", configuration.GetString(prefix+"directory"))
log.Infof("sink.s3.endpoint: %v", configuration.GetString(prefix+"endpoint"))
return s3sink.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),

View File

@ -10,7 +10,7 @@ import (
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/chrislusf/seaweedfs/weed/filer"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -24,9 +24,9 @@ func (s3sink *S3Sink) deleteObject(key string) error {
result, err := s3sink.conn.DeleteObject(input)
if err == nil {
glog.V(0).Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] delete %s: %v", s3sink.bucket, key, result)
} else {
glog.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
log.Errorf("[%s] delete %s: %v", s3sink.bucket, key, err)
}
return err
@ -43,9 +43,9 @@ func (s3sink *S3Sink) createMultipartUpload(key string, entry *filer_pb.Entry) (
result, err := s3sink.conn.CreateMultipartUpload(input)
if err == nil {
glog.V(0).Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, result)
} else {
glog.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
log.Errorf("[%s] createMultipartUpload %s: %v", s3sink.bucket, key, err)
return "", err
}
@ -64,19 +64,19 @@ func (s3sink *S3Sink) abortMultipartUpload(key, uploadId string) error {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case s3.ErrCodeNoSuchUpload:
glog.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error())
log.Errorf("[%s] abortMultipartUpload %s: %v %v", s3sink.bucket, key, s3.ErrCodeNoSuchUpload, aerr.Error())
default:
glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
log.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
}
} else {
// Print the error, cast err to awserr.Error to get the Code and
// Message from an error.
glog.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
log.Errorf("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, aerr.Error())
}
return err
}
glog.V(0).Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] abortMultipartUpload %s: %v", s3sink.bucket, key, result)
return nil
}
@ -94,9 +94,9 @@ func (s3sink *S3Sink) completeMultipartUpload(ctx context.Context, key, uploadId
result, err := s3sink.conn.CompleteMultipartUpload(input)
if err == nil {
glog.V(0).Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
log.Infof("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, result)
} else {
glog.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
log.Errorf("[%s] completeMultipartUpload %s: %v", s3sink.bucket, key, err)
}
return err
@ -108,7 +108,7 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
readSeeker, err := s3sink.buildReadSeeker(chunk)
if err != nil {
glog.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
log.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
return nil, fmt.Errorf("[%s] uploadPart %s %d read: %v", s3sink.bucket, key, partId, err)
}
@ -122,9 +122,9 @@ func (s3sink *S3Sink) uploadPart(key, uploadId string, partId int, chunk *filer.
result, err := s3sink.conn.UploadPart(input)
if err == nil {
glog.V(0).Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
log.Infof("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, result)
} else {
glog.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
log.Errorf("[%s] uploadPart %s %d upload: %v", s3sink.bucket, key, partId, err)
}
part := &s3.CompletedPart{
@ -148,9 +148,9 @@ func (s3sink *S3Sink) uploadPartCopy(key, uploadId string, partId int64, copySou
result, err := s3sink.conn.UploadPartCopy(input)
if err == nil {
glog.V(0).Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result)
log.Infof("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, result)
} else {
glog.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err)
log.Errorf("[%s] uploadPartCopy %s %d: %v", s3sink.bucket, key, partId, err)
}
return err
@ -165,7 +165,7 @@ func (s3sink *S3Sink) buildReadSeeker(chunk *filer.ChunkView) (io.ReadSeeker, er
for _, fileUrl := range fileUrls {
_, err = util.ReadUrl(fileUrl+"?readDeleted=true", nil, false, false, chunk.Offset, int(chunk.Size), buf)
if err != nil {
glog.V(1).Infof("read from %s: %v", fileUrl, err)
log.Debugf("read from %s: %v", fileUrl, err)
} else {
break
}

View File

@ -12,7 +12,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
)
@ -49,7 +49,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
err = fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid)
log.Tracef("read lookup volume id locations: %v", vid)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
@ -63,14 +63,14 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrls []string, err error)
})
if err != nil {
glog.V(1).Infof("LookupFileId volume id %s: %v", vid, err)
log.Debugf("LookupFileId volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId volume id %s: %v", vid, err)
}
locations := vid2Locations[vid]
if locations == nil || len(locations.Locations) == 0 {
glog.V(1).Infof("LookupFileId locate volume id %s: %v", vid, err)
log.Debugf("LookupFileId locate volume id %s: %v", vid, err)
return nil, fmt.Errorf("LookupFileId locate volume id %s: %v", vid, err)
}
@ -91,7 +91,7 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
for _, fileUrl := range fileUrls {
filename, header, resp, err = util.DownloadFile(fileUrl)
if err != nil {
glog.V(1).Infof("fail to read from %s: %v", fileUrl, err)
log.Debugf("fail to read from %s: %v", fileUrl, err)
} else {
break
}

View File

@ -8,7 +8,7 @@ import (
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/sqs"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -28,8 +28,8 @@ func (k *AwsSqsInput) GetName() string {
}
func (k *AwsSqsInput) Initialize(configuration util.Configuration, prefix string) error {
glog.V(0).Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
glog.V(0).Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
log.Infof("replication.notification.aws_sqs.region: %v", configuration.GetString(prefix+"region"))
log.Infof("replication.notification.aws_sqs.sqs_queue_name: %v", configuration.GetString(prefix+"sqs_queue_name"))
return k.initialize(
configuration.GetString(prefix+"aws_access_key_id"),
configuration.GetString(prefix+"aws_secret_access_key"),
@ -106,7 +106,7 @@ func (k *AwsSqsInput) ReceiveMessage() (key string, message *filer_pb.EventNotif
})
if err != nil {
glog.V(1).Infof("delete message from sqs %s: %v", k.queueUrl, err)
log.Debugf("delete message from sqs %s: %v", k.queueUrl, err)
}
return

View File

@ -3,7 +3,7 @@ package sub
import (
"context"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -29,7 +29,7 @@ func (k *GoCDKPubSubInput) GetName() string {
func (k *GoCDKPubSubInput) Initialize(configuration util.Configuration, prefix string) error {
subURL := configuration.GetString(prefix + "sub_url")
glog.V(0).Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
log.Infof("notification.gocdk_pub_sub.sub_url: %v", subURL)
sub, err := pubsub.OpenSubscription(context.Background(), subURL)
if err != nil {
return err

View File

@ -6,7 +6,7 @@ import (
"os"
"cloud.google.com/go/pubsub"
"github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/util/log"
"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
"github.com/chrislusf/seaweedfs/weed/util"
"github.com/golang/protobuf/proto"
@ -28,8 +28,8 @@ func (k *GooglePubSubInput) GetName() string {
}
func (k *GooglePubSubInput) Initialize(configuration util.Configuration, prefix string) error {
glog.V(0).Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
glog.V(0).Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
log.Infof("notification.google_pub_sub.project_id: %v", configuration.GetString(prefix+"project_id"))
log.Infof("notification.google_pub_sub.topic: %v", configuration.GetString(prefix+"topic"))
return k.initialize(
configuration.GetString(prefix+"google_application_credentials"),
configuration.GetString(prefix+"project_id"),
@ -45,13 +45,13 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
var found bool
google_application_credentials, found = os.LookupEnv("GOOGLE_APPLICATION_CREDENTIALS")
if !found {
glog.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
log.Fatalf("need to specific GOOGLE_APPLICATION_CREDENTIALS env variable or google_application_credentials in filer.toml")
}
}
client, err := pubsub.NewClient(ctx, projectId, option.WithCredentialsFile(google_application_credentials))
if err != nil {
glog.Fatalf("Failed to create client: %v", err)
log.Fatalf("Failed to create client: %v", err)
}
k.topicName = topicName
@ -60,11 +60,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
if !exists {
topic, err = client.CreateTopic(ctx, topicName)
if err != nil {
glog.Fatalf("Failed to create topic %s: %v", topicName, err)
log.Fatalf("Failed to create topic %s: %v", topicName, err)
}
}
} else {
glog.Fatalf("Failed to check topic %s: %v", topicName, err)
log.Fatalf("Failed to check topic %s: %v", topicName, err)
}
subscriptionName := "seaweedfs_sub"
@ -74,11 +74,11 @@ func (k *GooglePubSubInput) initialize(google_application_credentials, projectId
if !exists {
k.sub, err = client.CreateSubscription(ctx, subscriptionName, pubsub.SubscriptionConfig{Topic: topic})
if err != nil {
glog.Fatalf("Failed to create subscription %s: %v", subscriptionName, err)
log.Fatalf("Failed to create subscription %s: %v", subscriptionName, err)
}
}
} else {
glog.Fatalf("Failed to check subscription %s: %v", topicName, err)
log.Fatalf("Failed to check subscription %s: %v", topicName, err)
}
k.messageChan = make(chan *pubsub.Message, 1)

Some files were not shown because too many files have changed in this diff Show More