1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-06-28 13:23:03 +02:00

read meta logs by timestamp

pass in event ts when moving logs
meta aggregator reads in memory logs only
This commit is contained in:
Chris Lu 2020-08-29 17:37:19 -07:00
parent 063c9ddac5
commit b69cb74c03
6 changed files with 28 additions and 18 deletions

View file

@ -16,7 +16,10 @@ import (
"github.com/chrislusf/seaweedfs/weed/wdclient"
)
const PaginationSize = 1024 * 256
const (
LogFlushInterval = time.Minute
PaginationSize = 1024 * 256
)
var (
OS_UID = uint32(os.Getuid())
@ -47,7 +50,7 @@ func NewFiler(masters []string, grpcDialOption grpc.DialOption,
GrpcDialOption: grpcDialOption,
Signature: util.RandomInt32(),
}
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, f.logFlushFunc, notifyFn)
f.LocalMetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, f.logFlushFunc, notifyFn)
f.metaLogCollection = collection
f.metaLogReplication = replication

View file

@ -68,7 +68,7 @@ func (f *Filer) logMetaEvent(ctx context.Context, fullpath string, eventNotifica
return
}
f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data)
f.LocalMetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
}
@ -119,7 +119,7 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
if lastTsNs, err = ReadEachLogEntry(chunkedFileReader, sizeBuf, startTsNs, eachLogEntryFn); err != nil {
chunkedFileReader.Close()
if err == io.EOF {
break
continue
}
return lastTsNs, fmt.Errorf("reading %s: %v", hourMinuteEntry.FullPath, err)
}

View file

@ -25,13 +25,15 @@ type MetaAggregator struct {
ListenersCond *sync.Cond
}
// MetaAggregator only aggregates data "on the fly". The logs are not re-persisted to disk.
// The old data comes from what each LocalMetadata persisted on disk.
func NewMetaAggregator(filers []string, grpcDialOption grpc.DialOption) *MetaAggregator {
t := &MetaAggregator{
filers: filers,
grpcDialOption: grpcDialOption,
}
t.ListenersCond = sync.NewCond(&t.ListenersLock)
t.MetaLogBuffer = log_buffer.NewLogBuffer(time.Minute, nil, func() {
t.MetaLogBuffer = log_buffer.NewLogBuffer(LogFlushInterval, nil, func() {
t.ListenersCond.Broadcast()
})
return t
@ -48,7 +50,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, filer strin
var maybeReplicateMetadataChange func(*filer_pb.SubscribeMetadataResponse)
lastPersistTime := time.Now()
changesSinceLastPersist := 0
lastTsNs := int64(0)
lastTsNs := time.Now().Add(-LogFlushInterval).UnixNano()
MaxChangeLimit := 100
@ -88,7 +90,7 @@ func (ma *MetaAggregator) subscribeToOneFiler(f *Filer, self string, filer strin
}
dir := event.Directory
// println("received meta change", dir, "size", len(data))
ma.MetaLogBuffer.AddToBuffer([]byte(dir), data)
ma.MetaLogBuffer.AddToBuffer([]byte(dir), data, event.TsNs)
if maybeReplicateMetadataChange != nil {
maybeReplicateMetadataChange(event)
}

View file

@ -85,7 +85,7 @@ func (broker *MessageBroker) Publish(stream messaging_pb.SeaweedMessaging_Publis
continue
}
tl.logBuffer.AddToBuffer(in.Data.Key, data)
tl.logBuffer.AddToBuffer(in.Data.Key, data, in.Data.EventTimeNs)
if in.Data.IsClose {
// println("server received closing")

View file

@ -53,7 +53,7 @@ func NewLogBuffer(flushInterval time.Duration, flushFn func(startTime, stopTime
return lb
}
func (m *LogBuffer) AddToBuffer(partitionKey, data []byte) {
func (m *LogBuffer) AddToBuffer(partitionKey, data []byte, eventTsNs int64) {
m.Lock()
defer func() {
@ -64,16 +64,21 @@ func (m *LogBuffer) AddToBuffer(partitionKey, data []byte) {
}()
// need to put the timestamp inside the lock
ts := time.Now()
tsNs := ts.UnixNano()
if m.lastTsNs >= tsNs {
// this is unlikely to happen, but just in case
tsNs = m.lastTsNs + 1
ts = time.Unix(0, tsNs)
var ts time.Time
if eventTsNs == 0 {
ts = time.Now()
eventTsNs = ts.UnixNano()
} else {
ts = time.Unix(0, eventTsNs)
}
m.lastTsNs = tsNs
if m.lastTsNs >= eventTsNs {
// this is unlikely to happen, but just in case
eventTsNs = m.lastTsNs + 1
ts = time.Unix(0, eventTsNs)
}
m.lastTsNs = eventTsNs
logEntry := &filer_pb.LogEntry{
TsNs: tsNs,
TsNs: eventTsNs,
PartitionKeyHash: util.HashToInt32(partitionKey),
Data: data,
}

View file

@ -23,7 +23,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
var buf = make([]byte, messageSize)
for i := 0; i < messageCount; i++ {
rand.Read(buf)
lb.AddToBuffer(nil, buf)
lb.AddToBuffer(nil, buf, 0)
}
receivedmessageCount := 0