1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-09 21:09:59 +02:00
seaweedfs/weed/util/log_buffer/log_read.go
2024-03-10 14:34:28 -07:00

128 lines
3.5 KiB
Go

package log_buffer
import (
"bytes"
"fmt"
"time"
"google.golang.org/protobuf/proto"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
var (
ResumeError = fmt.Errorf("resume")
ResumeFromDiskError = fmt.Errorf("resumeFromDisk")
)
type MessagePosition struct {
time.Time // this is the timestamp of the message
BatchIndex int64 // this is only used when the timestamp is not enough to identify the next message, when the timestamp is in the previous batch.
}
func NewMessagePosition(tsNs int64, batchIndex int64) MessagePosition {
return MessagePosition{
Time: time.Unix(0, tsNs).UTC(),
BatchIndex: batchIndex,
}
}
func (logBuffer *LogBuffer) LoopProcessLogData(readerName string, startPosition MessagePosition, stopTsNs int64,
waitForDataFn func() bool, eachLogDataFn EachLogEntryFuncType) (lastReadPosition MessagePosition, isDone bool, err error) {
// loop through all messages
var bytesBuf *bytes.Buffer
var batchIndex int64
lastReadPosition = startPosition
var entryCounter int64
defer func() {
if bytesBuf != nil {
logBuffer.ReleaseMemory(bytesBuf)
}
println("LoopProcessLogData", readerName, "sent messages total", entryCounter)
}()
for {
if bytesBuf != nil {
logBuffer.ReleaseMemory(bytesBuf)
}
bytesBuf, batchIndex, err = logBuffer.ReadFromBuffer(lastReadPosition)
if err == ResumeFromDiskError {
time.Sleep(1127 * time.Millisecond)
return lastReadPosition, isDone, ResumeFromDiskError
}
readSize := 0
if bytesBuf != nil {
readSize = bytesBuf.Len()
}
glog.V(0).Infof("%s ReadFromBuffer at %v batch %d. Read bytes %v batch %d", readerName, lastReadPosition, lastReadPosition.BatchIndex, readSize, batchIndex)
if bytesBuf == nil {
if batchIndex >= 0 {
lastReadPosition = NewMessagePosition(lastReadPosition.UnixNano(), batchIndex)
}
if stopTsNs != 0 {
isDone = true
return
}
if waitForDataFn() {
continue
} else {
isDone = true
return
}
if logBuffer.IsStopping() {
isDone = true
return
}
}
buf := bytesBuf.Bytes()
// fmt.Printf("ReadFromBuffer %s by %v size %d\n", readerName, lastReadPosition, len(buf))
batchSize := 0
for pos := 0; pos+4 < len(buf); {
size := util.BytesToUint32(buf[pos : pos+4])
if pos+4+int(size) > len(buf) {
err = ResumeError
glog.Errorf("LoopProcessLogData: %s read buffer %v read %d entries [%d,%d) from [0,%d)", readerName, lastReadPosition, batchSize, pos, pos+int(size)+4, len(buf))
return
}
entryData := buf[pos+4 : pos+4+int(size)]
logEntry := &filer_pb.LogEntry{}
if err = proto.Unmarshal(entryData, logEntry); err != nil {
glog.Errorf("unexpected unmarshal mq_pb.Message: %v", err)
pos += 4 + int(size)
continue
}
if stopTsNs != 0 && logEntry.TsNs > stopTsNs {
isDone = true
println("stopTsNs", stopTsNs, "logEntry.TsNs", logEntry.TsNs)
return
}
lastReadPosition = NewMessagePosition(logEntry.TsNs, batchIndex)
if isDone, err = eachLogDataFn(logEntry); err != nil {
glog.Errorf("LoopProcessLogData: %s process log entry %d %v: %v", readerName, batchSize+1, logEntry, err)
return
}
if isDone {
glog.V(0).Infof("LoopProcessLogData2: %s process log entry %d", readerName, batchSize+1)
return
}
pos += 4 + int(size)
batchSize++
entryCounter++
}
glog.V(0).Infof("%s sent messages ts[%+v,%+v] size %d\n", readerName, startPosition, lastReadPosition, batchSize)
}
}