1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-31 07:40:36 +02:00
This commit is contained in:
chrislu 2024-02-29 09:38:52 -08:00
parent 2a7028373d
commit 1b4484bf0a
31 changed files with 116 additions and 117 deletions

View file

@ -23,16 +23,16 @@ type ReaderCache struct {
type SingleChunkCacher struct { type SingleChunkCacher struct {
completedTimeNew int64 completedTimeNew int64
sync.Mutex sync.Mutex
parent *ReaderCache parent *ReaderCache
chunkFileId string chunkFileId string
data []byte data []byte
err error err error
cipherKey []byte cipherKey []byte
isGzipped bool isGzipped bool
chunkSize int chunkSize int
shouldCache bool shouldCache bool
wg sync.WaitGroup wg sync.WaitGroup
cacheStartedCh chan struct{} cacheStartedCh chan struct{}
} }
func NewReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn wdclient.LookupFileIdFunctionType) *ReaderCache { func NewReaderCache(limit int, chunkCache chunk_cache.ChunkCache, lookupFileIdFn wdclient.LookupFileIdFunctionType) *ReaderCache {

View file

@ -68,7 +68,7 @@ func (b *MessageQueueBroker) SubscribeMessage(req *mq_pb.SubscribeMessageRequest
}() }()
var startPosition log_buffer.MessagePosition var startPosition log_buffer.MessagePosition
if req.GetInit()!=nil && req.GetInit().GetPartitionOffset() != nil { if req.GetInit() != nil && req.GetInit().GetPartitionOffset() != nil {
offset := req.GetInit().GetPartitionOffset() offset := req.GetInit().GetPartitionOffset()
if offset.StartTsNs != 0 { if offset.StartTsNs != 0 {
startPosition = log_buffer.NewMessagePosition(offset.StartTsNs, -2) startPosition = log_buffer.NewMessagePosition(offset.StartTsNs, -2)

View file

@ -26,7 +26,7 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Par
startTime, stopTime = startTime.UTC(), stopTime.UTC() startTime, stopTime = startTime.UTC(), stopTime.UTC()
targetFile := fmt.Sprintf("%s/%s",partitionDir, startTime.Format(topic.TIME_FORMAT)) targetFile := fmt.Sprintf("%s/%s", partitionDir, startTime.Format(topic.TIME_FORMAT))
// TODO append block with more metadata // TODO append block with more metadata
@ -50,7 +50,7 @@ func (b *MessageQueueBroker) genLogOnDiskReadFunc(t topic.Topic, partition *mq_p
return b.MasterClient.LookupFileId(fileId) return b.MasterClient.LookupFileId(fileId)
} }
eachChunkFn := func (buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) { eachChunkFn := func(buf []byte, eachLogEntryFn log_buffer.EachLogEntryFuncType, starTsNs, stopTsNs int64) (processedTsNs int64, err error) {
for pos := 0; pos+4 < len(buf); { for pos := 0; pos+4 < len(buf); {
size := util.BytesToUint32(buf[pos : pos+4]) size := util.BytesToUint32(buf[pos : pos+4])
@ -99,7 +99,7 @@ func (b *MessageQueueBroker) genLogOnDiskReadFunc(t topic.Topic, partition *mq_p
if chunk.Size == 0 { if chunk.Size == 0 {
continue continue
} }
if chunk.IsChunkManifest{ if chunk.IsChunkManifest {
glog.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name) glog.Warningf("this should not happen. unexpected chunk manifest in %s/%s", partitionDir, entry.Name)
return return
} }
@ -145,7 +145,7 @@ func (b *MessageQueueBroker) genLogOnDiskReadFunc(t topic.Topic, partition *mq_p
if entry.IsDirectory { if entry.IsDirectory {
return nil return nil
} }
if stopTsNs!=0 && entry.Name > stopTime.UTC().Format(topic.TIME_FORMAT) { if stopTsNs != 0 && entry.Name > stopTime.UTC().Format(topic.TIME_FORMAT) {
isDone = true isDone = true
return nil return nil
} }

View file

@ -24,14 +24,14 @@ func (b *MessageQueueBroker) appendToFile(targetFile string, data []byte) error
var offset int64 = 0 var offset int64 = 0
if err == filer_pb.ErrNotFound { if err == filer_pb.ErrNotFound {
entry = &filer_pb.Entry{ entry = &filer_pb.Entry{
Name: name, Name: name,
IsDirectory: false, IsDirectory: false,
Attributes: &filer_pb.FuseAttributes{ Attributes: &filer_pb.FuseAttributes{
Crtime: time.Now().Unix(), Crtime: time.Now().Unix(),
Mtime: time.Now().Unix(), Mtime: time.Now().Unix(),
FileMode: uint32(os.FileMode(0644)), FileMode: uint32(os.FileMode(0644)),
Uid: uint32(os.Getuid()), Uid: uint32(os.Getuid()),
Gid: uint32(os.Getgid()), Gid: uint32(os.Getgid()),
}, },
} }
} else if err != nil { } else if err != nil {
@ -45,11 +45,11 @@ func (b *MessageQueueBroker) appendToFile(targetFile string, data []byte) error
// update the entry // update the entry
return b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error { return b.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
return filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{ return filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
Directory: dir, Directory: dir,
Entry: entry, Entry: entry,
})
}) })
})
} }
func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fileId string, uploadResult *operation.UploadResult, err error) { func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fileId string, uploadResult *operation.UploadResult, err error) {
@ -63,11 +63,11 @@ func (b *MessageQueueBroker) assignAndUpload(targetFile string, data []byte) (fi
Collection: "topics", Collection: "topics",
// TtlSec: wfs.option.TtlSec, // TtlSec: wfs.option.TtlSec,
// DiskType: string(wfs.option.DiskType), // DiskType: string(wfs.option.DiskType),
DataCenter: b.option.DataCenter, DataCenter: b.option.DataCenter,
Path: targetFile, Path: targetFile,
}, },
&operation.UploadOption{ &operation.UploadOption{
Cipher: b.option.Cipher, Cipher: b.option.Cipher,
}, },
func(host, fileId string) string { func(host, fileId string) string {
fileUrl := fmt.Sprintf("http://%s/%s", host, fileId) fileUrl := fmt.Sprintf("http://%s/%s", host, fileId)

View file

@ -12,8 +12,8 @@ import (
) )
var ( var (
messageCount = flag.Int("n", 1000, "message count") messageCount = flag.Int("n", 1000, "message count")
concurrency = flag.Int("c", 4, "concurrent publishers") concurrency = flag.Int("c", 4, "concurrent publishers")
partitionCount = flag.Int("p", 6, "partition count") partitionCount = flag.Int("p", 6, "partition count")
namespace = flag.String("ns", "test", "namespace") namespace = flag.String("ns", "test", "namespace")

View file

@ -7,7 +7,6 @@ import (
"github.com/seaweedfs/seaweedfs/weed/util" "github.com/seaweedfs/seaweedfs/weed/util"
) )
func (p *TopicPublisher) Publish(key, value []byte) error { func (p *TopicPublisher) Publish(key, value []byte) error {
hashKey := util.HashToInt32(key) % pub_balancer.MaxPartitionCount hashKey := util.HashToInt32(key) % pub_balancer.MaxPartitionCount
if hashKey < 0 { if hashKey < 0 {

View file

@ -16,7 +16,7 @@ type PublisherConfiguration struct {
Topic topic.Topic Topic topic.Topic
CreateTopic bool CreateTopic bool
CreateTopicPartitionCount int32 CreateTopicPartitionCount int32
Brokers []string Brokers []string
} }
type PublishClient struct { type PublishClient struct {

View file

@ -31,10 +31,10 @@ const (
type Balancer struct { type Balancer struct {
Brokers cmap.ConcurrentMap[string, *BrokerStats] // key: broker address Brokers cmap.ConcurrentMap[string, *BrokerStats] // key: broker address
// Collected from all brokers when they connect to the broker leader // Collected from all brokers when they connect to the broker leader
TopicToBrokers cmap.ConcurrentMap[string, *PartitionSlotToBrokerList] // key: topic name TopicToBrokers cmap.ConcurrentMap[string, *PartitionSlotToBrokerList] // key: topic name
OnPartitionChange func(topic *mq_pb.Topic, assignments []*mq_pb.BrokerPartitionAssignment) OnPartitionChange func(topic *mq_pb.Topic, assignments []*mq_pb.BrokerPartitionAssignment)
OnAddBroker func(broker string, brokerStats *BrokerStats) OnAddBroker func(broker string, brokerStats *BrokerStats)
OnRemoveBroker func(broker string, brokerStats *BrokerStats) OnRemoveBroker func(broker string, brokerStats *BrokerStats)
} }
func NewBalancer() *Balancer { func NewBalancer() *Balancer {

View file

@ -39,11 +39,11 @@ func (bs *BrokerStats) UpdateStats(stats *mq_pb.BrokerStats) {
for _, topicPartitionStats := range stats.Stats { for _, topicPartitionStats := range stats.Stats {
tps := &TopicPartitionStats{ tps := &TopicPartitionStats{
TopicPartition: topic.TopicPartition{ TopicPartition: topic.TopicPartition{
Topic: topic.Topic{Namespace: topicPartitionStats.Topic.Namespace, Name: topicPartitionStats.Topic.Name}, Topic: topic.Topic{Namespace: topicPartitionStats.Topic.Namespace, Name: topicPartitionStats.Topic.Name},
Partition: topic.Partition{ Partition: topic.Partition{
RangeStart: topicPartitionStats.Partition.RangeStart, RangeStart: topicPartitionStats.Partition.RangeStart,
RangeStop: topicPartitionStats.Partition.RangeStop, RangeStop: topicPartitionStats.Partition.RangeStop,
RingSize: topicPartitionStats.Partition.RingSize, RingSize: topicPartitionStats.Partition.RingSize,
UnixTimeNs: topicPartitionStats.Partition.UnixTimeNs, UnixTimeNs: topicPartitionStats.Partition.UnixTimeNs,
}, },
}, },
@ -66,11 +66,11 @@ func (bs *BrokerStats) UpdateStats(stats *mq_pb.BrokerStats) {
func (bs *BrokerStats) RegisterAssignment(t *mq_pb.Topic, partition *mq_pb.Partition, isAdd bool) { func (bs *BrokerStats) RegisterAssignment(t *mq_pb.Topic, partition *mq_pb.Partition, isAdd bool) {
tps := &TopicPartitionStats{ tps := &TopicPartitionStats{
TopicPartition: topic.TopicPartition{ TopicPartition: topic.TopicPartition{
Topic: topic.Topic{Namespace: t.Namespace, Name: t.Name}, Topic: topic.Topic{Namespace: t.Namespace, Name: t.Name},
Partition: topic.Partition{ Partition: topic.Partition{
RangeStart: partition.RangeStart, RangeStart: partition.RangeStart,
RangeStop: partition.RangeStop, RangeStop: partition.RangeStop,
RingSize: partition.RingSize, RingSize: partition.RingSize,
UnixTimeNs: partition.UnixTimeNs, UnixTimeNs: partition.UnixTimeNs,
}, },
}, },

View file

@ -16,12 +16,12 @@ type ConsumerGroupInstance struct {
ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse ResponseChan chan *mq_pb.SubscriberToSubCoordinatorResponse
} }
type ConsumerGroup struct { type ConsumerGroup struct {
topic topic.Topic topic topic.Topic
// map a consumer group instance id to a consumer group instance // map a consumer group instance id to a consumer group instance
ConsumerGroupInstances cmap.ConcurrentMap[string, *ConsumerGroupInstance] ConsumerGroupInstances cmap.ConcurrentMap[string, *ConsumerGroupInstance]
mapping *PartitionConsumerMapping mapping *PartitionConsumerMapping
reBalanceTimer *time.Timer reBalanceTimer *time.Timer
pubBalancer *pub_balancer.Balancer pubBalancer *pub_balancer.Balancer
} }
func NewConsumerGroup(t *mq_pb.Topic, pubBalancer *pub_balancer.Balancer) *ConsumerGroup { func NewConsumerGroup(t *mq_pb.Topic, pubBalancer *pub_balancer.Balancer) *ConsumerGroup {
@ -40,13 +40,13 @@ func NewConsumerGroupInstance(instanceId string) *ConsumerGroupInstance {
} }
} }
func (cg *ConsumerGroup) OnAddConsumerGroupInstance(consumerGroupInstance string, topic *mq_pb.Topic) { func (cg *ConsumerGroup) OnAddConsumerGroupInstance(consumerGroupInstance string, topic *mq_pb.Topic) {
cg.onConsumerGroupInstanceChange("add consumer instance "+ consumerGroupInstance) cg.onConsumerGroupInstanceChange("add consumer instance " + consumerGroupInstance)
} }
func (cg *ConsumerGroup) OnRemoveConsumerGroupInstance(consumerGroupInstance string, topic *mq_pb.Topic) { func (cg *ConsumerGroup) OnRemoveConsumerGroupInstance(consumerGroupInstance string, topic *mq_pb.Topic) {
cg.onConsumerGroupInstanceChange("remove consumer instance "+ consumerGroupInstance) cg.onConsumerGroupInstanceChange("remove consumer instance " + consumerGroupInstance)
} }
func (cg *ConsumerGroup) onConsumerGroupInstanceChange(reason string){ func (cg *ConsumerGroup) onConsumerGroupInstanceChange(reason string) {
if cg.reBalanceTimer != nil { if cg.reBalanceTimer != nil {
cg.reBalanceTimer.Stop() cg.reBalanceTimer.Stop()
cg.reBalanceTimer = nil cg.reBalanceTimer = nil
@ -107,9 +107,9 @@ func (cg *ConsumerGroup) RebalanceConsumberGroupInstances(knownPartitionSlotToBr
for i, partitionSlot := range partitionSlots { for i, partitionSlot := range partitionSlots {
assignedPartitions[i] = &mq_pb.SubscriberToSubCoordinatorResponse_AssignedPartition{ assignedPartitions[i] = &mq_pb.SubscriberToSubCoordinatorResponse_AssignedPartition{
Partition: &mq_pb.Partition{ Partition: &mq_pb.Partition{
RangeStop: partitionSlot.RangeStop, RangeStop: partitionSlot.RangeStop,
RangeStart: partitionSlot.RangeStart, RangeStart: partitionSlot.RangeStart,
RingSize: partitionSlotToBrokerList.RingSize, RingSize: partitionSlotToBrokerList.RingSize,
UnixTimeNs: partitionSlot.UnixTimeNs, UnixTimeNs: partitionSlot.UnixTimeNs,
}, },
Broker: partitionSlot.Broker, Broker: partitionSlot.Broker,
@ -126,5 +126,4 @@ func (cg *ConsumerGroup) RebalanceConsumberGroupInstances(knownPartitionSlotToBr
consumerGroupInstance.ResponseChan <- response consumerGroupInstance.ResponseChan <- response
} }
} }

View file

@ -31,7 +31,7 @@ func NewCoordinator(balancer *pub_balancer.Balancer) *Coordinator {
func (c *Coordinator) GetTopicConsumerGroups(topic *mq_pb.Topic, createIfMissing bool) *TopicConsumerGroups { func (c *Coordinator) GetTopicConsumerGroups(topic *mq_pb.Topic, createIfMissing bool) *TopicConsumerGroups {
topicName := toTopicName(topic) topicName := toTopicName(topic)
tcg, _ := c.TopicSubscribers.Get(topicName) tcg, _ := c.TopicSubscribers.Get(topicName)
if tcg == nil && createIfMissing{ if tcg == nil && createIfMissing {
tcg = &TopicConsumerGroups{ tcg = &TopicConsumerGroups{
ConsumerGroups: cmap.New[*ConsumerGroup](), ConsumerGroups: cmap.New[*ConsumerGroup](),
} }
@ -56,14 +56,14 @@ func (c *Coordinator) AddSubscriber(consumerGroup, consumerGroupInstance string,
cg, _ := tcg.ConsumerGroups.Get(consumerGroup) cg, _ := tcg.ConsumerGroups.Get(consumerGroup)
if cg == nil { if cg == nil {
cg = NewConsumerGroup(topic, c.balancer) cg = NewConsumerGroup(topic, c.balancer)
if !tcg.ConsumerGroups.SetIfAbsent(consumerGroup, cg){ if !tcg.ConsumerGroups.SetIfAbsent(consumerGroup, cg) {
cg, _ = tcg.ConsumerGroups.Get(consumerGroup) cg, _ = tcg.ConsumerGroups.Get(consumerGroup)
} }
} }
cgi, _ := cg.ConsumerGroupInstances.Get(consumerGroupInstance) cgi, _ := cg.ConsumerGroupInstances.Get(consumerGroupInstance)
if cgi == nil { if cgi == nil {
cgi = NewConsumerGroupInstance(consumerGroupInstance) cgi = NewConsumerGroupInstance(consumerGroupInstance)
if !cg.ConsumerGroupInstances.SetIfAbsent(consumerGroupInstance, cgi){ if !cg.ConsumerGroupInstances.SetIfAbsent(consumerGroupInstance, cgi) {
cgi, _ = cg.ConsumerGroupInstances.Get(consumerGroupInstance) cgi, _ = cg.ConsumerGroupInstances.Get(consumerGroupInstance)
} }
} }

View file

@ -88,7 +88,7 @@ func (manager *LocalTopicManager) CollectStats(duration time.Duration) *mq_pb.Br
manager.topics.IterCb(func(topic string, localTopic *LocalTopic) { manager.topics.IterCb(func(topic string, localTopic *LocalTopic) {
for _, localPartition := range localTopic.Partitions { for _, localPartition := range localTopic.Partitions {
topicPartition := &TopicPartition{ topicPartition := &TopicPartition{
Topic: Topic{Namespace: localTopic.Namespace, Name: localTopic.Name}, Topic: Topic{Namespace: localTopic.Namespace, Name: localTopic.Name},
Partition: localPartition.Partition, Partition: localPartition.Partition,
} }
stats.Stats[topicPartition.String()] = &mq_pb.TopicPartitionStats{ stats.Stats[topicPartition.String()] = &mq_pb.TopicPartitionStats{
@ -96,7 +96,7 @@ func (manager *LocalTopicManager) CollectStats(duration time.Duration) *mq_pb.Br
Namespace: string(localTopic.Namespace), Namespace: string(localTopic.Namespace),
Name: localTopic.Name, Name: localTopic.Name,
}, },
Partition: localPartition.Partition.ToPbPartition(), Partition: localPartition.Partition.ToPbPartition(),
ConsumerCount: localPartition.ConsumerCount, ConsumerCount: localPartition.ConsumerCount,
} }
// fmt.Printf("collect topic %+v partition %+v\n", topicPartition, localPartition.Partition) // fmt.Printf("collect topic %+v partition %+v\n", topicPartition, localPartition.Partition)

View file

@ -22,6 +22,7 @@ type LocalPartition struct {
} }
var TIME_FORMAT = "2006-01-02-15-04-05" var TIME_FORMAT = "2006-01-02-15-04-05"
func NewLocalPartition(partition Partition, isLeader bool, followerBrokers []pb.ServerAddress, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition { func NewLocalPartition(partition Partition, isLeader bool, followerBrokers []pb.ServerAddress, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition {
return &LocalPartition{ return &LocalPartition{
Partition: partition, Partition: partition,

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: filer.proto // source: filer.proto
package filer_pb package filer_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: filer.proto // source: filer.proto
package filer_pb package filer_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: iam.proto // source: iam.proto
package iam_pb package iam_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: iam.proto // source: iam.proto
package iam_pb package iam_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: master.proto // source: master.proto
package master_pb package master_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: master.proto // source: master.proto
package master_pb package master_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: mount.proto // source: mount.proto
package mount_pb package mount_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: mount.proto // source: mount.proto
package mount_pb package mount_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: mq.proto // source: mq.proto
package mq_pb package mq_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: remote.proto // source: remote.proto
package remote_pb package remote_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: s3.proto // source: s3.proto
package s3_pb package s3_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: s3.proto // source: s3.proto
package s3_pb package s3_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT. // Code generated by protoc-gen-go. DO NOT EDIT.
// versions: // versions:
// protoc-gen-go v1.32.0 // protoc-gen-go v1.31.0
// protoc v4.25.2 // protoc v4.25.3
// source: volume_server.proto // source: volume_server.proto
package volume_server_pb package volume_server_pb

View file

@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT. // Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions: // versions:
// - protoc-gen-go-grpc v1.3.0 // - protoc-gen-go-grpc v1.3.0
// - protoc v4.25.2 // - protoc v4.25.3
// source: volume_server.proto // source: volume_server.proto
package volume_server_pb package volume_server_pb

View file

@ -32,12 +32,12 @@ func NewBufferedQueue[T any](chunkSize int) *BufferedQueue[T] {
// Create an empty chunk to initialize head and tail // Create an empty chunk to initialize head and tail
chunk := &ItemChunkNode[T]{items: make([]T, chunkSize), nodeId: 0} chunk := &ItemChunkNode[T]{items: make([]T, chunkSize), nodeId: 0}
bq := &BufferedQueue[T]{ bq := &BufferedQueue[T]{
chunkSize: chunkSize, chunkSize: chunkSize,
head: chunk, head: chunk,
tail: chunk, tail: chunk,
last: chunk, last: chunk,
count: 0, count: 0,
mutex: sync.Mutex{}, mutex: sync.Mutex{},
} }
bq.waitCond = sync.NewCond(&bq.mutex) bq.waitCond = sync.NewCond(&bq.mutex)
return bq return bq
@ -87,7 +87,7 @@ func (q *BufferedQueue[T]) Dequeue() (T, bool) {
q.mutex.Lock() q.mutex.Lock()
defer q.mutex.Unlock() defer q.mutex.Unlock()
for q.count <= 0 && !q.isClosed { for q.count <= 0 && !q.isClosed {
q.waitCond.Wait() q.waitCond.Wait()
} }
if q.count <= 0 && q.isClosed { if q.count <= 0 && q.isClosed {

View file

@ -27,39 +27,39 @@ type LogFlushFuncType func(startTime, stopTime time.Time, buf []byte)
type LogReadFromDiskFuncType func(startPosition MessagePosition, stopTsNs int64, eachLogEntryFn EachLogEntryFuncType) (lastReadPosition MessagePosition, isDone bool, err error) type LogReadFromDiskFuncType func(startPosition MessagePosition, stopTsNs int64, eachLogEntryFn EachLogEntryFuncType) (lastReadPosition MessagePosition, isDone bool, err error)
type LogBuffer struct { type LogBuffer struct {
name string name string
prevBuffers *SealedBuffers prevBuffers *SealedBuffers
buf []byte buf []byte
batchIndex int64 batchIndex int64
idx []int idx []int
pos int pos int
startTime time.Time startTime time.Time
stopTime time.Time stopTime time.Time
lastFlushTime time.Time lastFlushTime time.Time
sizeBuf []byte sizeBuf []byte
flushInterval time.Duration flushInterval time.Duration
flushFn LogFlushFuncType flushFn LogFlushFuncType
ReadFromDiskFn LogReadFromDiskFuncType ReadFromDiskFn LogReadFromDiskFuncType
notifyFn func() notifyFn func()
isStopping *atomic.Bool isStopping *atomic.Bool
flushChan chan *dataToFlush flushChan chan *dataToFlush
lastTsNs int64 lastTsNs int64
sync.RWMutex sync.RWMutex
} }
func NewLogBuffer(name string, flushInterval time.Duration, flushFn LogFlushFuncType, func NewLogBuffer(name string, flushInterval time.Duration, flushFn LogFlushFuncType,
readFromDiskFn LogReadFromDiskFuncType, notifyFn func()) *LogBuffer { readFromDiskFn LogReadFromDiskFuncType, notifyFn func()) *LogBuffer {
lb := &LogBuffer{ lb := &LogBuffer{
name: name, name: name,
prevBuffers: newSealedBuffers(PreviousBufferCount), prevBuffers: newSealedBuffers(PreviousBufferCount),
buf: make([]byte, BufferSize), buf: make([]byte, BufferSize),
sizeBuf: make([]byte, 4), sizeBuf: make([]byte, 4),
flushInterval: flushInterval, flushInterval: flushInterval,
flushFn: flushFn, flushFn: flushFn,
ReadFromDiskFn: readFromDiskFn, ReadFromDiskFn: readFromDiskFn,
notifyFn: notifyFn, notifyFn: notifyFn,
flushChan: make(chan *dataToFlush, 256), flushChan: make(chan *dataToFlush, 256),
isStopping: new(atomic.Bool), isStopping: new(atomic.Bool),
} }
go lb.loopFlush() go lb.loopFlush()
go lb.loopInterval() go lb.loopInterval()
@ -199,10 +199,10 @@ func (logBuffer *LogBuffer) copyToFlush() *dataToFlush {
return nil return nil
} }
func (logBuffer *LogBuffer) GetEarliestTime() time.Time{ func (logBuffer *LogBuffer) GetEarliestTime() time.Time {
return logBuffer.startTime return logBuffer.startTime
} }
func (logBuffer *LogBuffer) GetEarliestPosition() MessagePosition{ func (logBuffer *LogBuffer) GetEarliestPosition() MessagePosition {
return MessagePosition{ return MessagePosition{
Time: logBuffer.startTime, Time: logBuffer.startTime,
BatchIndex: logBuffer.batchIndex, BatchIndex: logBuffer.batchIndex,
@ -241,8 +241,8 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu
} }
if tsMemory.IsZero() { // case 2.2 if tsMemory.IsZero() { // case 2.2
println("2.2 no data") println("2.2 no data")
return nil, -2,nil return nil, -2, nil
} else if lastReadPosition.Before(tsMemory) && lastReadPosition.BatchIndex +1 < tsBatchIndex { // case 2.3 } else if lastReadPosition.Before(tsMemory) && lastReadPosition.BatchIndex+1 < tsBatchIndex { // case 2.3
if !logBuffer.lastFlushTime.IsZero() { if !logBuffer.lastFlushTime.IsZero() {
glog.V(0).Infof("resume with last flush time: %v", logBuffer.lastFlushTime) glog.V(0).Infof("resume with last flush time: %v", logBuffer.lastFlushTime)
return nil, -2, ResumeFromDiskError return nil, -2, ResumeFromDiskError
@ -273,7 +273,7 @@ func (logBuffer *LogBuffer) ReadFromBuffer(lastReadPosition MessagePosition) (bu
} }
} }
// glog.V(4).Infof("%s return the current buf %v", m.name, lastReadPosition) // glog.V(4).Infof("%s return the current buf %v", m.name, lastReadPosition)
return copiedBytes(logBuffer.buf[:logBuffer.pos]), logBuffer.batchIndex,nil return copiedBytes(logBuffer.buf[:logBuffer.pos]), logBuffer.batchIndex, nil
} }
lastTs := lastReadPosition.UnixNano() lastTs := lastReadPosition.UnixNano()

View file

@ -18,7 +18,7 @@ var (
) )
type MessagePosition struct { type MessagePosition struct {
time.Time // this is the timestamp of the message time.Time // this is the timestamp of the message
BatchIndex int64 // this is only used when the timestamp is not enough to identify the next message, when the timestamp is in the previous batch. BatchIndex int64 // this is only used when the timestamp is not enough to identify the next message, when the timestamp is in the previous batch.
} }

View file

@ -6,10 +6,10 @@ import (
) )
type MemBuffer struct { type MemBuffer struct {
buf []byte buf []byte
size int size int
startTime time.Time startTime time.Time
stopTime time.Time stopTime time.Time
batchIndex int64 batchIndex int64
} }