1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-07-08 18:16:50 +02:00

Merge branch 'master' into refactoring_dat_backend

This commit is contained in:
Chris Lu 2019-10-29 23:18:41 -07:00
commit 5b950c735e
11 changed files with 21 additions and 24 deletions

View file

@ -1,7 +1,6 @@
package command package command
import ( import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
) )

View file

@ -1,7 +1,6 @@
package command package command
import ( import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
) )

View file

@ -1,7 +1,6 @@
package command package command
import ( import (
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
) )

View file

@ -57,7 +57,7 @@ func (ms *MasterServer) doDeleteNormalCollection(collectionName string) error {
} }
for _, server := range collection.ListVolumeServers() { for _, server := range collection.ListVolumeServers() {
err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collectionName, Collection: collectionName,
}) })
@ -77,7 +77,7 @@ func (ms *MasterServer) doDeleteEcCollection(collectionName string) error {
listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName) listOfEcServers := ms.Topo.ListEcServersByCollection(collectionName)
for _, server := range listOfEcServers { for _, server := range listOfEcServers {
err := operation.WithVolumeServerClient(server, ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(server, ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collectionName, Collection: collectionName,
}) })

View file

@ -78,7 +78,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
} }
ms.vgLock.Lock() ms.vgLock.Lock()
if !ms.Topo.HasWritableVolume(option) { if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil { if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo); err != nil {
ms.vgLock.Unlock() ms.vgLock.Unlock()
return nil, fmt.Errorf("Cannot grow volume group! %v", err) return nil, fmt.Errorf("Cannot grow volume group! %v", err)
} }

View file

@ -3,9 +3,6 @@ package weed_server
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/shell"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"google.golang.org/grpc"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
"net/url" "net/url"
@ -21,10 +18,13 @@ import (
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/security" "github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/sequence" "github.com/chrislusf/seaweedfs/weed/sequence"
"github.com/chrislusf/seaweedfs/weed/shell"
"github.com/chrislusf/seaweedfs/weed/topology" "github.com/chrislusf/seaweedfs/weed/topology"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/chrislusf/seaweedfs/weed/wdclient"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"github.com/spf13/viper" "github.com/spf13/viper"
"google.golang.org/grpc"
) )
type MasterOption struct { type MasterOption struct {
@ -57,7 +57,7 @@ type MasterServer struct {
clientChansLock sync.RWMutex clientChansLock sync.RWMutex
clientChans map[string]chan *master_pb.VolumeLocation clientChans map[string]chan *master_pb.VolumeLocation
grpcDialOpiton grpc.DialOption grpcDialOption grpc.DialOption
MasterClient *wdclient.MasterClient MasterClient *wdclient.MasterClient
} }
@ -83,7 +83,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
option: option, option: option,
preallocateSize: preallocateSize, preallocateSize: preallocateSize,
clientChans: make(map[string]chan *master_pb.VolumeLocation), clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOpiton: grpcDialOption, grpcDialOption: grpcDialOption,
MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers), MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "master", peers),
} }
ms.bounedLeaderChan = make(chan int, 16) ms.bounedLeaderChan = make(chan int, 16)
@ -112,7 +112,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
r.HandleFunc("/{fileId}", ms.redirectHandler) r.HandleFunc("/{fileId}", ms.redirectHandler)
} }
ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOpiton, ms.option.GarbageThreshold, ms.preallocateSize) ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOption, ms.option.GarbageThreshold, ms.preallocateSize)
ms.startAdminScripts() ms.startAdminScripts()

View file

@ -108,7 +108,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
ms.vgLock.Lock() ms.vgLock.Lock()
defer ms.vgLock.Unlock() defer ms.vgLock.Unlock()
if !ms.Topo.HasWritableVolume(option) { if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil { if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo); err != nil {
writeJsonError(w, r, http.StatusInternalServerError, writeJsonError(w, r, http.StatusInternalServerError,
fmt.Errorf("Cannot grow volume group! %v", err)) fmt.Errorf("Cannot grow volume group! %v", err))
return return

View file

@ -24,7 +24,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
return return
} }
for _, server := range collection.ListVolumeServers() { for _, server := range collection.ListVolumeServers() {
err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
_, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{ _, deleteErr := client.DeleteCollection(context.Background(), &volume_server_pb.DeleteCollectionRequest{
Collection: collection.Name, Collection: collection.Name,
}) })
@ -57,7 +57,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
} }
} }
glog.Infoln("garbageThreshold =", gcThreshold) glog.Infoln("garbageThreshold =", gcThreshold)
ms.Topo.Vacuum(ms.grpcDialOpiton, gcThreshold, ms.preallocateSize) ms.Topo.Vacuum(ms.grpcDialOption, gcThreshold, ms.preallocateSize)
ms.dirStatusHandler(w, r) ms.dirStatusHandler(w, r)
} }
@ -73,7 +73,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) { if ms.Topo.FreeSpace() < int64(count*option.ReplicaPlacement.GetCopyCount()) {
err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount()) err = fmt.Errorf("only %d volumes left, not enough for %d", ms.Topo.FreeSpace(), count*option.ReplicaPlacement.GetCopyCount())
} else { } else {
count, err = ms.vg.GrowByCountAndType(ms.grpcDialOpiton, count, option, ms.Topo) count, err = ms.vg.GrowByCountAndType(ms.grpcDialOption, count, option, ms.Topo)
} }
} else { } else {
err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count")) err = fmt.Errorf("can not parse parameter count %s", r.FormValue("count"))
@ -119,13 +119,13 @@ func (ms *MasterServer) selfUrl(r *http.Request) string {
} }
func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) {
if ms.Topo.IsLeader() { if ms.Topo.IsLeader() {
submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOpiton) submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOption)
} else { } else {
masterUrl, err := ms.Topo.Leader() masterUrl, err := ms.Topo.Leader()
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
} else { } else {
submitForClientHandler(w, r, masterUrl, ms.grpcDialOpiton) submitForClientHandler(w, r, masterUrl, ms.grpcDialOption)
} }
} }
} }

View file

@ -25,7 +25,7 @@ type RaftServer struct {
*raft.GrpcServer *raft.GrpcServer
} }
func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr string, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer { func NewRaftServer(grpcDialOption grpc.DialOption, peers []string, serverAddr, dataDir string, topo *topology.Topology, pulseSeconds int) *RaftServer {
s := &RaftServer{ s := &RaftServer{
peers: peers, peers: peers,
serverAddr: serverAddr, serverAddr: serverAddr,

View file

@ -24,7 +24,7 @@ func (v *Volume) garbageLevel() float64 {
func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error { func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error {
if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory
glog.V(3).Infof("Compacting volume %d ...", v.Id) glog.V(3).Infof("Compacting volume %d ...", v.Id)
//no need to lock for copy on write //no need to lock for copy on write
//v.accessLock.Lock() //v.accessLock.Lock()
@ -47,7 +47,7 @@ func (v *Volume) Compact(preallocate int64, compactionBytePerSecond int64) error
func (v *Volume) Compact2() error { func (v *Volume) Compact2() error {
if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory
glog.V(3).Infof("Compact2 volume %d ...", v.Id) glog.V(3).Infof("Compact2 volume %d ...", v.Id)
v.isCompacting = true v.isCompacting = true
@ -64,7 +64,7 @@ func (v *Volume) Compact2() error {
} }
func (v *Volume) CommitCompact() error { func (v *Volume) CommitCompact() error {
if v.MemoryMapMaxSizeMb > 0 { //it makes no sense to compact in memory if v.MemoryMapMaxSizeMb == 0 { //it makes no sense to compact in memory
glog.V(0).Infof("Committing volume %d vacuuming...", v.Id) glog.V(0).Infof("Committing volume %d vacuuming...", v.Id)
v.isCompacting = true v.isCompacting = true

View file

@ -120,10 +120,10 @@ func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) { func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) {
vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option) vid, count, datanodes, err := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl).PickForWrite(count, option)
if err != nil { if err != nil {
return "", 0, nil, fmt.Errorf("failed to find writable volumes for collectio:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err) return "", 0, nil, fmt.Errorf("failed to find writable volumes for collection:%s replication:%s ttl:%s error: %v", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String(), err)
} }
if datanodes.Length() == 0 { if datanodes.Length() == 0 {
return "", 0, nil, fmt.Errorf("no writable volumes available for for collectio:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String()) return "", 0, nil, fmt.Errorf("no writable volumes available for collection:%s replication:%s ttl:%s", option.Collection, option.ReplicaPlacement.String(), option.Ttl.String())
} }
fileId, count := t.Sequence.NextFileId(count) fileId, count := t.Sequence.NextFileId(count)
return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil return needle.NewFileId(*vid, fileId, rand.Uint32()).String(), count, datanodes.Head(), nil