1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-06-02 16:50:25 +02:00
This commit is contained in:
Chris Lu 2020-05-10 03:50:30 -07:00
parent 6bf3eb69cb
commit 39e72fb23c
13 changed files with 15 additions and 14 deletions

View file

@ -232,7 +232,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
Reader: &FakeReader{id: uint64(id), size: fileSize, random: random},
FileSize: fileSize,
MimeType: "image/bench", // prevent gzip benchmark content
Fsync: *b.fsync,
Fsync: *b.fsync,
}
ar := &operation.VolumeAssignRequest{
Count: 1,

View file

@ -428,7 +428,7 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
uploadError = fmt.Errorf("upload %v to %s result: %v\n", fileName, targetUrl, uploadResult.Error)
return
}
chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i * chunkSize)
chunksChan <- uploadResult.ToPbFileChunk(assignResult.FileId, i*chunkSize)
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
}(i)

View file

@ -59,7 +59,7 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
return &ChunkReadAt{
chunkViews: chunkViews,
chunkViews: chunkViews,
lookupFileId: LookupFn(filerClient),
bufferOffset: -1,
chunkCache: chunkCache,

View file

@ -103,7 +103,7 @@ func NewChunkStreamReader(filerClient filer_pb.FilerClient, chunks []*filer_pb.F
chunkViews := ViewFromChunks(chunks, 0, math.MaxInt32)
return &ChunkStreamReader{
chunkViews: chunkViews,
chunkViews: chunkViews,
lookupFileId: LookupFn(filerClient),
}
}

View file

@ -37,8 +37,7 @@ func (broker *MessageBroker) Subscribe(stream messaging_pb.SeaweedMessaging_Subs
// IsTransient: true,
}
if err = stream.Send(&messaging_pb.BrokerMessage{
}); err != nil {
if err = stream.Send(&messaging_pb.BrokerMessage{}); err != nil {
return err
}

View file

@ -1,8 +1,8 @@
package broker
import (
"github.com/cespare/xxhash"
"github.com/buraksezer/consistent"
"github.com/cespare/xxhash"
)
type Member string
@ -35,4 +35,4 @@ func PickMember(members []string, key []byte) string {
m := c.LocateKey(key)
return m.String()
}
}

View file

@ -18,7 +18,7 @@ func TestPickMember(t *testing.T) {
total := 1000
distribution := make(map[string]int)
for i:=0;i<total;i++{
for i := 0; i < total; i++ {
tp := fmt.Sprintf("tp:%2d", i)
m := PickMember(servers, []byte(tp))
// println(tp, "=>", m)
@ -29,4 +29,4 @@ func TestPickMember(t *testing.T) {
fmt.Printf("member: %s, key count: %d load=%.2f\n", member, count, float64(count*100)/float64(total/len(servers)))
}
}
}

View file

@ -16,9 +16,11 @@ type TopicPartition struct {
Topic string
Partition int32
}
const (
TopicPartitionFmt = "%s/%s_%02d"
)
func (tp *TopicPartition) String() string {
return fmt.Sprintf(TopicPartitionFmt, tp.Namespace, tp.Topic, tp.Partition)
}

View file

@ -28,7 +28,6 @@ func NewMessagingClient(bootstrapBrokers ...string) *MessagingClient {
}
}
func (mc *MessagingClient) findBroker(tp broker.TopicPartition) (*grpc.ClientConn, error) {
for _, broker := range mc.bootstrapBrokers {

View file

@ -16,6 +16,7 @@ type Publisher struct {
messageCount uint64
publisherId string
}
/*
func (mc *MessagingClient) NewPublisher(publisherId, namespace, topic string) (*Publisher, error) {
// read topic configuration

View file

@ -5,8 +5,8 @@ import (
"io"
"time"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
"google.golang.org/grpc"
)
type Subscriber struct {

View file

@ -27,7 +27,7 @@ type FilePart struct {
Ttl string
Server string //this comes from assign result
Fid string //this comes from assign result, but customizable
Fsync bool
Fsync bool
}
type SubmitResult struct {

View file

@ -59,4 +59,4 @@ func (mb *MemBuffer) locateByTs(lastReadTime time.Time) (pos int) {
func (mb *MemBuffer) String() string {
return fmt.Sprintf("[%v,%v] bytes:%d", mb.startTime, mb.stopTime, mb.size)
}
}