1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-06-16 23:51:15 +02:00

adding grpc mutual tls

This commit is contained in:
Chris Lu 2019-02-18 12:11:52 -08:00
parent 55761ae806
commit 77b9af531d
53 changed files with 382 additions and 188 deletions

View file

@ -4,6 +4,9 @@ import (
"bytes" "bytes"
"flag" "flag"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"log" "log"
"math/rand" "math/rand"
@ -19,8 +22,11 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
weed_server.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
for i := 0; i < *repeat; i++ { for i := 0; i < *repeat; i++ {
assignResult, err := operation.Assign(*master, &operation.VolumeAssignRequest{Count: 1}) assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{Count: 1})
if err != nil { if err != nil {
log.Fatalf("assign: %v", err) log.Fatalf("assign: %v", err)
} }

View file

@ -2,6 +2,9 @@ package command
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
@ -46,6 +49,10 @@ var cmdBackup = &Command{
} }
func runBackup(cmd *Command, args []string) bool { func runBackup(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
if *s.volumeId == -1 { if *s.volumeId == -1 {
return false return false
} }
@ -59,7 +66,7 @@ func runBackup(cmd *Command, args []string) bool {
} }
volumeServer := lookup.Locations[0].Url volumeServer := lookup.Locations[0].Url
stats, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) stats, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
if err != nil { if err != nil {
fmt.Printf("Error get volume %d status: %v\n", vid, err) fmt.Printf("Error get volume %d status: %v\n", vid, err)
return true return true
@ -81,7 +88,7 @@ func runBackup(cmd *Command, args []string) bool {
return true return true
} }
if err := v.Synchronize(volumeServer); err != nil { if err := v.Synchronize(volumeServer, grpcDialOption); err != nil {
fmt.Printf("Error synchronizing volume %d: %v\n", vid, err) fmt.Printf("Error synchronizing volume %d: %v\n", vid, err)
return true return true
} }

View file

@ -4,6 +4,9 @@ import (
"bufio" "bufio"
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"google.golang.org/grpc"
"io" "io"
"math" "math"
"math/rand" "math/rand"
@ -35,6 +38,7 @@ type BenchmarkOptions struct {
collection *string collection *string
cpuprofile *string cpuprofile *string
maxCpu *int maxCpu *int
grpcDialOption grpc.DialOption
} }
var ( var (
@ -101,6 +105,10 @@ var (
) )
func runBenchmark(cmd *Command, args []string) bool { func runBenchmark(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
b.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 { if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU() *b.maxCpu = runtime.NumCPU()
@ -115,7 +123,7 @@ func runBenchmark(cmd *Command, args []string) bool {
defer pprof.StopCPUProfile() defer pprof.StopCPUProfile()
} }
masterClient = wdclient.NewMasterClient(context.Background(), "benchmark", strings.Split(*b.masters, ",")) masterClient = wdclient.NewMasterClient(context.Background(), b.grpcDialOption, "client", strings.Split(*b.masters, ","))
go masterClient.KeepConnectedToMaster() go masterClient.KeepConnectedToMaster()
masterClient.WaitUntilConnected() masterClient.WaitUntilConnected()
@ -223,12 +231,12 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
Count: 1, Count: 1,
Collection: *b.collection, Collection: *b.collection,
} }
if assignResult, err := operation.Assign(masterClient.GetMaster(), ar); err == nil { if assignResult, err := operation.Assign(masterClient.GetMaster(), b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection fp.Server, fp.Fid, fp.Collection = assignResult.Url, assignResult.Fid, *b.collection
if !isSecure && assignResult.Auth != "" { if !isSecure && assignResult.Auth != "" {
isSecure = true isSecure = true
} }
if _, err := fp.Upload(0, masterClient.GetMaster(), assignResult.Auth); err == nil { if _, err := fp.Upload(0, masterClient.GetMaster(), assignResult.Auth, b.grpcDialOption); err == nil {
if random.Intn(100) < *b.deletePercentage { if random.Intn(100) < *b.deletePercentage {
s.total++ s.total++
delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp} delayedDeleteChan <- &delayedFile{time.Now().Add(time.Second), fp}

View file

@ -1,6 +1,8 @@
package command package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"net/http" "net/http"
"strconv" "strconv"
"strings" "strings"
@ -75,6 +77,8 @@ var cmdFiler = &Command{
func runFiler(cmd *Command, args []string) bool { func runFiler(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
f.startFiler() f.startFiler()
return true return true
@ -141,7 +145,7 @@ func (fo *FilerOptions) startFiler() {
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
} }
grpcS := util.NewGrpcServer() grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "filer"))
filer_pb.RegisterSeaweedFilerServer(grpcS, fs) filer_pb.RegisterSeaweedFilerServer(grpcS, fs)
reflection.Register(grpcS) reflection.Register(grpcS)
go grpcS.Serve(grpcL) go grpcS.Serve(grpcL)

View file

@ -2,6 +2,10 @@ package command
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"google.golang.org/grpc"
"io/ioutil" "io/ioutil"
"net/url" "net/url"
"os" "os"
@ -23,13 +27,14 @@ var (
) )
type CopyOptions struct { type CopyOptions struct {
filerGrpcPort *int filerGrpcPort *int
master *string master *string
include *string include *string
replication *string replication *string
collection *string collection *string
ttl *string ttl *string
maxMB *int maxMB *int
grpcDialOption grpc.DialOption
} }
func init() { func init() {
@ -61,6 +66,9 @@ var cmdCopy = &Command{
} }
func runCopy(cmd *Command, args []string) bool { func runCopy(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
if len(args) <= 1 { if len(args) <= 1 {
return false return false
} }
@ -95,16 +103,17 @@ func runCopy(cmd *Command, args []string) bool {
} }
filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort) filerGrpcAddress := fmt.Sprintf("%s:%d", filerUrl.Hostname(), filerGrpcPort)
copy.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
for _, fileOrDir := range fileOrDirs { for _, fileOrDir := range fileOrDirs {
if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, urlPath) { if !doEachCopy(fileOrDir, filerUrl.Host, filerGrpcAddress, copy.grpcDialOption, urlPath) {
return false return false
} }
} }
return true return true
} }
func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path string) bool { func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, path string) bool {
f, err := os.Open(fileOrDir) f, err := os.Open(fileOrDir)
if err != nil { if err != nil {
fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err) fmt.Printf("Failed to open file %s: %v\n", fileOrDir, err)
@ -122,7 +131,7 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path st
if mode.IsDir() { if mode.IsDir() {
files, _ := ioutil.ReadDir(fileOrDir) files, _ := ioutil.ReadDir(fileOrDir)
for _, subFileOrDir := range files { for _, subFileOrDir := range files {
if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, path+fi.Name()+"/") { if !doEachCopy(fileOrDir+"/"+subFileOrDir.Name(), filerAddress, filerGrpcAddress, grpcDialOption, path+fi.Name()+"/") {
return false return false
} }
} }
@ -144,13 +153,13 @@ func doEachCopy(fileOrDir string, filerAddress, filerGrpcAddress string, path st
} }
if chunkCount == 1 { if chunkCount == 1 {
return uploadFileAsOne(filerAddress, filerGrpcAddress, path, f, fi) return uploadFileAsOne(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi)
} }
return uploadFileInChunks(filerAddress, filerGrpcAddress, path, f, fi, chunkCount, chunkSize) return uploadFileInChunks(filerAddress, filerGrpcAddress, grpcDialOption, path, f, fi, chunkCount, chunkSize)
} }
func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo) bool { func uploadFileAsOne(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo) bool {
// upload the file content // upload the file content
fileName := filepath.Base(f.Name()) fileName := filepath.Base(f.Name())
@ -161,7 +170,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f
if fi.Size() > 0 { if fi.Size() > 0 {
// assign a volume // assign a volume
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ assignResult, err := operation.Assign(*copy.master, grpcDialOption, &operation.VolumeAssignRequest{
Count: 1, Count: 1,
Replication: *copy.replication, Replication: *copy.replication,
Collection: *copy.collection, Collection: *copy.collection,
@ -195,7 +204,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f
fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName) fmt.Printf("copied %s => http://%s%s%s\n", fileName, filerAddress, urlFolder, fileName)
} }
if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: urlFolder, Directory: urlFolder,
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
@ -228,7 +237,7 @@ func uploadFileAsOne(filerAddress, filerGrpcAddress string, urlFolder string, f
return true return true
} }
func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool { func uploadFileInChunks(filerAddress, filerGrpcAddress string, grpcDialOption grpc.DialOption, urlFolder string, f *os.File, fi os.FileInfo, chunkCount int, chunkSize int64) bool {
fileName := filepath.Base(f.Name()) fileName := filepath.Base(f.Name())
mimeType := detectMimeType(f) mimeType := detectMimeType(f)
@ -238,7 +247,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string,
for i := int64(0); i < int64(chunkCount); i++ { for i := int64(0); i < int64(chunkCount); i++ {
// assign a volume // assign a volume
assignResult, err := operation.Assign(*copy.master, &operation.VolumeAssignRequest{ assignResult, err := operation.Assign(*copy.master, grpcDialOption, &operation.VolumeAssignRequest{
Count: 1, Count: 1,
Replication: *copy.replication, Replication: *copy.replication,
Collection: *copy.collection, Collection: *copy.collection,
@ -272,7 +281,7 @@ func uploadFileInChunks(filerAddress, filerGrpcAddress string, urlFolder string,
fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size)) fmt.Printf("uploaded %s-%d to %s [%d,%d)\n", fileName, i+1, targetUrl, i*chunkSize, i*chunkSize+int64(uploadResult.Size))
} }
if err := withFilerClient(filerGrpcAddress, func(client filer_pb.SeaweedFilerClient) error { if err := withFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
request := &filer_pb.CreateEntryRequest{ request := &filer_pb.CreateEntryRequest{
Directory: urlFolder, Directory: urlFolder,
Entry: &filer_pb.Entry{ Entry: &filer_pb.Entry{
@ -323,9 +332,9 @@ func detectMimeType(f *os.File) string {
return mimeType return mimeType
} }
func withFilerClient(filerAddress string, fn func(filer_pb.SeaweedFilerClient) error) error { func withFilerClient(filerAddress string, grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
grpcConnection, err := util.GrpcDial(filerAddress) grpcConnection, err := util.GrpcDial(filerAddress, grpcDialOption)
if err != nil { if err != nil {
return fmt.Errorf("fail to dial %s: %v", filerAddress, err) return fmt.Errorf("fail to dial %s: %v", filerAddress, err)
} }

View file

@ -35,6 +35,7 @@ var cmdFilerReplicate = &Command{
func runFilerReplicate(cmd *Command, args []string) bool { func runFilerReplicate(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
weed_server.LoadConfiguration("replication", true) weed_server.LoadConfiguration("replication", true)
weed_server.LoadConfiguration("notification", true) weed_server.LoadConfiguration("notification", true)
config := viper.GetViper() config := viper.GetViper()

View file

@ -1,6 +1,8 @@
package command package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"
@ -54,6 +56,9 @@ var (
) )
func runMaster(cmd *Command, args []string) bool { func runMaster(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
if *mMaxCpu < 1 { if *mMaxCpu < 1 {
*mMaxCpu = runtime.NumCPU() *mMaxCpu = runtime.NumCPU()
} }
@ -104,7 +109,7 @@ func runMaster(cmd *Command, args []string) bool {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
} }
// Create your protocol servers. // Create your protocol servers.
grpcS := util.NewGrpcServer() grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master"))
master_pb.RegisterSeaweedServer(grpcS, ms) master_pb.RegisterSeaweedServer(grpcS, ms)
reflection.Register(grpcS) reflection.Register(grpcS)

View file

@ -4,6 +4,9 @@ package command
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"os" "os"
"os/user" "os/user"
"runtime" "runtime"
@ -19,6 +22,9 @@ import (
) )
func runMount(cmd *Command, args []string) bool { func runMount(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH) fmt.Printf("This is SeaweedFS version %s %s %s\n", util.VERSION, runtime.GOOS, runtime.GOARCH)
if *mountOptions.dir == "" { if *mountOptions.dir == "" {
fmt.Printf("Please specify the mount directory via \"-dir\"") fmt.Printf("Please specify the mount directory via \"-dir\"")
@ -91,6 +97,7 @@ func runMount(cmd *Command, args []string) bool {
err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{ err = fs.Serve(c, filesys.NewSeaweedFileSystem(&filesys.Option{
FilerGrpcAddress: filerGrpcAddress, FilerGrpcAddress: filerGrpcAddress,
GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
FilerMountRootPath: mountRoot, FilerMountRootPath: mountRoot,
Collection: *mountOptions.collection, Collection: *mountOptions.collection,
Replication: *mountOptions.replication, Replication: *mountOptions.replication,

View file

@ -1,6 +1,9 @@
package command package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"net/http" "net/http"
"time" "time"
@ -46,6 +49,8 @@ var cmdS3 = &Command{
func runS3(cmd *Command, args []string) bool { func runS3(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort) filerGrpcAddress, err := parseFilerGrpcAddress(*s3options.filer, *s3options.filerGrpcPort)
if err != nil { if err != nil {
glog.Fatal(err) glog.Fatal(err)
@ -59,6 +64,7 @@ func runS3(cmd *Command, args []string) bool {
FilerGrpcAddress: filerGrpcAddress, FilerGrpcAddress: filerGrpcAddress,
DomainName: *s3options.domainName, DomainName: *s3options.domainName,
BucketsPath: *s3options.filerBucketsPath, BucketsPath: *s3options.filerBucketsPath,
GrpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "client"),
}) })
if s3ApiServer_err != nil { if s3ApiServer_err != nil {
glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err) glog.Fatalf("S3 API Server startup error: %v", s3ApiServer_err)

View file

@ -250,8 +250,34 @@ directory = "/" # destination directory
# /etc/seaweedfs/security.toml # /etc/seaweedfs/security.toml
# this file is read by master, volume server, and filer # this file is read by master, volume server, and filer
# the jwt signing key is read by master and volume server
# a jwt expires in 10 seconds
[jwt.signing] [jwt.signing]
key = "" key = ""
# volume server also uses grpc that should be secured.
# all grpc tls authentications are mutual
[grpc]
ca = ""
[grpc.volume]
cert = ""
key = ""
[grpc.master]
cert = ""
key = ""
[grpc.filer]
cert = ""
key = ""
# use this for any place needs a grpc client
# i.e., "weed backup|benchmark|filer.copy|filer.replicate|mount|s3|upload"
[grpc.client]
cert = ""
key = ""
` `
) )

View file

@ -1,6 +1,8 @@
package command package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"
@ -95,6 +97,9 @@ func init() {
} }
func runServer(cmd *Command, args []string) bool { func runServer(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
if *serverOptions.cpuprofile != "" { if *serverOptions.cpuprofile != "" {
f, err := os.Create(*serverOptions.cpuprofile) f, err := os.Create(*serverOptions.cpuprofile)
if err != nil { if err != nil {
@ -188,7 +193,8 @@ func runServer(cmd *Command, args []string) bool {
glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("master failed to listen on grpc port %d: %v", grpcPort, err)
} }
// Create your protocol servers. // Create your protocol servers.
grpcS := util.NewGrpcServer() glog.V(0).Infof("grpc config %+v", viper.Sub("grpc"))
grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "master"))
master_pb.RegisterSeaweedServer(grpcS, ms) master_pb.RegisterSeaweedServer(grpcS, ms)
reflection.Register(grpcS) reflection.Register(grpcS)

View file

@ -3,6 +3,9 @@ package command
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/chrislusf/seaweedfs/weed/server"
"github.com/spf13/viper"
"os" "os"
"path/filepath" "path/filepath"
@ -57,6 +60,10 @@ var cmdUpload = &Command{
} }
func runUpload(cmd *Command, args []string) bool { func runUpload(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "client")
if len(args) == 0 { if len(args) == 0 {
if *upload.dir == "" { if *upload.dir == "" {
return false return false
@ -73,7 +80,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil { if e != nil {
return e return e
} }
results, e := operation.SubmitFiles(*upload.master, parts, results, e := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
*upload.replication, *upload.collection, *upload.dataCenter, *upload.replication, *upload.collection, *upload.dataCenter,
*upload.ttl, *upload.maxMB) *upload.ttl, *upload.maxMB)
bytes, _ := json.Marshal(results) bytes, _ := json.Marshal(results)
@ -92,7 +99,7 @@ func runUpload(cmd *Command, args []string) bool {
if e != nil { if e != nil {
fmt.Println(e.Error()) fmt.Println(e.Error())
} }
results, _ := operation.SubmitFiles(*upload.master, parts, results, _ := operation.SubmitFiles(*upload.master, grpcDialOption, parts,
*upload.replication, *upload.collection, *upload.dataCenter, *upload.replication, *upload.collection, *upload.dataCenter,
*upload.ttl, *upload.maxMB) *upload.ttl, *upload.maxMB)
bytes, _ := json.Marshal(results) bytes, _ := json.Marshal(results)

View file

@ -1,6 +1,8 @@
package command package command
import ( import (
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"net/http" "net/http"
"os" "os"
"runtime" "runtime"
@ -78,6 +80,9 @@ var (
) )
func runVolume(cmd *Command, args []string) bool { func runVolume(cmd *Command, args []string) bool {
weed_server.LoadConfiguration("security", false)
if *v.maxCpu < 1 { if *v.maxCpu < 1 {
*v.maxCpu = runtime.NumCPU() *v.maxCpu = runtime.NumCPU()
} }
@ -185,7 +190,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
if err != nil { if err != nil {
glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err) glog.Fatalf("failed to listen on grpc port %d: %v", grpcPort, err)
} }
grpcS := util.NewGrpcServer() grpcS := util.NewGrpcServer(security.LoadServerTLS(viper.Sub("grpc"), "volume"))
volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer) volume_server_pb.RegisterVolumeServerServer(grpcS, volumeServer)
reflection.Register(grpcS) reflection.Register(grpcS)
go grpcS.Serve(grpcL) go grpcS.Serve(grpcL)

View file

@ -3,6 +3,7 @@ package filer2
import ( import (
"context" "context"
"fmt" "fmt"
"google.golang.org/grpc"
"math" "math"
"os" "os"
"path/filepath" "path/filepath"
@ -24,13 +25,15 @@ type Filer struct {
directoryCache *ccache.Cache directoryCache *ccache.Cache
MasterClient *wdclient.MasterClient MasterClient *wdclient.MasterClient
fileIdDeletionChan chan string fileIdDeletionChan chan string
GrpcDialOption grpc.DialOption
} }
func NewFiler(masters []string) *Filer { func NewFiler(masters []string, grpcDialOption grpc.DialOption) *Filer {
f := &Filer{ f := &Filer{
directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)), directoryCache: ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100)),
MasterClient: wdclient.NewMasterClient(context.Background(), "filer", masters), MasterClient: wdclient.NewMasterClient(context.Background(), grpcDialOption, "filer", masters),
fileIdDeletionChan: make(chan string, 4096), fileIdDeletionChan: make(chan string, 4096),
GrpcDialOption: grpcDialOption,
} }
go f.loopProcessingDeletion() go f.loopProcessingDeletion()

View file

@ -38,13 +38,13 @@ func (f *Filer) loopProcessingDeletion() {
fileIds = append(fileIds, fid) fileIds = append(fileIds, fid)
if len(fileIds) >= 4096 { if len(fileIds) >= 4096 {
glog.V(1).Infof("deleting fileIds len=%d", len(fileIds)) glog.V(1).Infof("deleting fileIds len=%d", len(fileIds))
operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
fileIds = fileIds[:0] fileIds = fileIds[:0]
} }
case <-ticker.C: case <-ticker.C:
if len(fileIds) > 0 { if len(fileIds) > 0 {
glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds)) glog.V(1).Infof("timed deletion fileIds len=%d", len(fileIds))
operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) operation.DeleteFilesWithLookupVolumeId(f.GrpcDialOption, fileIds, lookupFunc)
fileIds = fileIds[:0] fileIds = fileIds[:0]
} }
} }

View file

@ -8,7 +8,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}
@ -61,7 +61,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestEmptyRoot(t *testing.T) { func TestEmptyRoot(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2") dir, _ := ioutil.TempDir("", "seaweedfs_filer_test2")
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
store := &LevelDBStore{} store := &LevelDBStore{}

View file

@ -6,7 +6,7 @@ import (
) )
func TestCreateAndFind(t *testing.T) { func TestCreateAndFind(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
store := &MemDbStore{} store := &MemDbStore{}
store.Initialize(nil) store.Initialize(nil)
filer.SetStore(store) filer.SetStore(store)
@ -43,7 +43,7 @@ func TestCreateAndFind(t *testing.T) {
} }
func TestCreateFileAndList(t *testing.T) { func TestCreateFileAndList(t *testing.T) {
filer := filer2.NewFiler(nil) filer := filer2.NewFiler(nil, nil)
store := &MemDbStore{} store := &MemDbStore{}
store.Initialize(nil) store.Initialize(nil)
filer.SetStore(store) filer.SetStore(store)

View file

@ -10,6 +10,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"github.com/seaweedfs/fuse" "github.com/seaweedfs/fuse"
"github.com/seaweedfs/fuse/fs" "github.com/seaweedfs/fuse/fs"
"google.golang.org/grpc"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
@ -230,7 +231,7 @@ func (fh *FileHandle) Flush(ctx context.Context, req *fuse.FlushRequest) error {
}) })
} }
func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, fileIds []string) error { func deleteFileIds(ctx context.Context, grpcDialOption grpc.DialOption, client filer_pb.SeaweedFilerClient, fileIds []string) error {
var vids []string var vids []string
for _, fileId := range fileIds { for _, fileId := range fileIds {
@ -267,7 +268,7 @@ func deleteFileIds(ctx context.Context, client filer_pb.SeaweedFilerClient, file
return m, err return m, err
} }
_, err := operation.DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) _, err := operation.DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
return err return err
} }

View file

@ -19,6 +19,7 @@ import (
type Option struct { type Option struct {
FilerGrpcAddress string FilerGrpcAddress string
GrpcDialOption grpc.DialOption
FilerMountRootPath string FilerMountRootPath string
Collection string Collection string
Replication string Replication string
@ -77,7 +78,7 @@ func (wfs *WFS) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) erro
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := filer_pb.NewSeaweedFilerClient(grpcConnection) client := filer_pb.NewSeaweedFilerClient(grpcConnection)
return fn(client) return fn(client)
}, wfs.option.FilerGrpcAddress) }, wfs.option.FilerGrpcAddress, wfs.option.GrpcDialOption)
} }

View file

@ -16,7 +16,7 @@ func (wfs *WFS) deleteFileChunks(chunks []*filer_pb.FileChunk) {
} }
wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { wfs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error {
deleteFileIds(context.Background(), client, fileIds) deleteFileIds(context.Background(), wfs.option.GrpcDialOption, client, fileIds)
return nil return nil
}) })
} }

View file

@ -3,6 +3,7 @@ package operation
import ( import (
"context" "context"
"fmt" "fmt"
"google.golang.org/grpc"
"strings" "strings"
"time" "time"
@ -30,7 +31,7 @@ type AssignResult struct {
Auth security.EncodedJwt `json:"auth,omitempty"` Auth security.EncodedJwt `json:"auth,omitempty"`
} }
func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) { func Assign(server string, grpcDialOption grpc.DialOption, primaryRequest *VolumeAssignRequest, alternativeRequests ...*VolumeAssignRequest) (*AssignResult, error) {
var requests []*VolumeAssignRequest var requests []*VolumeAssignRequest
requests = append(requests, primaryRequest) requests = append(requests, primaryRequest)
@ -44,7 +45,7 @@ func Assign(server string, primaryRequest *VolumeAssignRequest, alternativeReque
continue continue
} }
lastError = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { lastError = withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()

View file

@ -4,6 +4,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"google.golang.org/grpc"
"io" "io"
"net/http" "net/http"
"sort" "sort"
@ -69,12 +70,12 @@ func (cm *ChunkManifest) Marshal() ([]byte, error) {
return json.Marshal(cm) return json.Marshal(cm)
} }
func (cm *ChunkManifest) DeleteChunks(master string) error { func (cm *ChunkManifest) DeleteChunks(master string, grpcDialOption grpc.DialOption) error {
var fileIds []string var fileIds []string
for _, ci := range cm.Chunks { for _, ci := range cm.Chunks {
fileIds = append(fileIds, ci.Fid) fileIds = append(fileIds, ci.Fid)
} }
results, err := DeleteFiles(master, fileIds) results, err := DeleteFiles(master, grpcDialOption, fileIds)
if err != nil { if err != nil {
glog.V(0).Infof("delete %+v: %v", fileIds, err) glog.V(0).Infof("delete %+v: %v", fileIds, err)
return fmt.Errorf("chunk delete: %v", err) return fmt.Errorf("chunk delete: %v", err)

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"google.golang.org/grpc"
"net/http" "net/http"
"strings" "strings"
"sync" "sync"
@ -28,17 +29,17 @@ func ParseFileId(fid string) (vid string, key_cookie string, err error) {
} }
// DeleteFiles batch deletes a list of fileIds // DeleteFiles batch deletes a list of fileIds
func DeleteFiles(master string, fileIds []string) ([]*volume_server_pb.DeleteResult, error) { func DeleteFiles(master string, grpcDialOption grpc.DialOption, fileIds []string) ([]*volume_server_pb.DeleteResult, error) {
lookupFunc := func(vids []string) (map[string]LookupResult, error) { lookupFunc := func(vids []string) (map[string]LookupResult, error) {
return LookupVolumeIds(master, vids) return LookupVolumeIds(master, grpcDialOption, vids)
} }
return DeleteFilesWithLookupVolumeId(fileIds, lookupFunc) return DeleteFilesWithLookupVolumeId(grpcDialOption, fileIds, lookupFunc)
} }
func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) { func DeleteFilesWithLookupVolumeId(grpcDialOption grpc.DialOption, fileIds []string, lookupFunc func(vid []string) (map[string]LookupResult, error)) ([]*volume_server_pb.DeleteResult, error) {
var ret []*volume_server_pb.DeleteResult var ret []*volume_server_pb.DeleteResult
@ -92,7 +93,7 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin
go func(server string, fidList []string) { go func(server string, fidList []string) {
defer wg.Done() defer wg.Done()
if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, fidList); deleteErr != nil { if deleteResults, deleteErr := DeleteFilesAtOneVolumeServer(server, grpcDialOption, fidList); deleteErr != nil {
err = deleteErr err = deleteErr
} else { } else {
ret = append(ret, deleteResults...) ret = append(ret, deleteResults...)
@ -106,9 +107,9 @@ func DeleteFilesWithLookupVolumeId(fileIds []string, lookupFunc func(vid []strin
} }
// DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc // DeleteFilesAtOneVolumeServer deletes a list of files that is on one volume server via gRpc
func DeleteFilesAtOneVolumeServer(volumeServer string, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) { func DeleteFilesAtOneVolumeServer(volumeServer string, grpcDialOption grpc.DialOption, fileIds []string) (ret []*volume_server_pb.DeleteResult, err error) {
err = WithVolumeServerClient(volumeServer, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err = WithVolumeServerClient(volumeServer, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()

View file

@ -18,7 +18,7 @@ var (
grpcClientsLock sync.Mutex grpcClientsLock sync.Mutex
) )
func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.VolumeServerClient) error) error { func WithVolumeServerClient(volumeServer string, grpcDialOption grpc.DialOption, fn func(volume_server_pb.VolumeServerClient) error) error {
grpcAddress, err := toVolumeServerGrpcAddress(volumeServer) grpcAddress, err := toVolumeServerGrpcAddress(volumeServer)
if err != nil { if err != nil {
@ -28,7 +28,7 @@ func WithVolumeServerClient(volumeServer string, fn func(volume_server_pb.Volume
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := volume_server_pb.NewVolumeServerClient(grpcConnection) client := volume_server_pb.NewVolumeServerClient(grpcConnection)
return fn(client) return fn(client)
}, grpcAddress) }, grpcAddress, grpcDialOption)
} }
@ -42,7 +42,7 @@ func toVolumeServerGrpcAddress(volumeServer string) (grpcAddress string, err err
return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil return fmt.Sprintf("%s:%d", volumeServer[0:sepIndex], port+10000), nil
} }
func withMasterServerClient(masterServer string, fn func(masterClient master_pb.SeaweedClient) error) error { func withMasterServerClient(masterServer string, grpcDialOption grpc.DialOption, fn func(masterClient master_pb.SeaweedClient) error) error {
masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0) masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(masterServer, 0)
if parseErr != nil { if parseErr != nil {
@ -52,6 +52,6 @@ func withMasterServerClient(masterServer string, fn func(masterClient master_pb.
return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error { return util.WithCachedGrpcClient(func(grpcConnection *grpc.ClientConn) error {
client := master_pb.NewSeaweedClient(grpcConnection) client := master_pb.NewSeaweedClient(grpcConnection)
return fn(client) return fn(client)
}, masterGrpcAddress) }, masterGrpcAddress, grpcDialOption)
} }

View file

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"google.golang.org/grpc"
"math/rand" "math/rand"
"net/url" "net/url"
"strings" "strings"
@ -78,7 +79,7 @@ func LookupFileId(server string, fileId string) (fullUrl string, err error) {
} }
// LookupVolumeIds find volume locations by cache and actual lookup // LookupVolumeIds find volume locations by cache and actual lookup
func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, error) { func LookupVolumeIds(server string, grpcDialOption grpc.DialOption, vids []string) (map[string]LookupResult, error) {
ret := make(map[string]LookupResult) ret := make(map[string]LookupResult)
var unknown_vids []string var unknown_vids []string
@ -98,7 +99,7 @@ func LookupVolumeIds(server string, vids []string) (map[string]LookupResult, err
//only query unknown_vids //only query unknown_vids
err := withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { err := withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()

View file

@ -2,14 +2,15 @@ package operation
import ( import (
"context" "context"
"google.golang.org/grpc"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
) )
func Statistics(server string, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) { func Statistics(server string, grpcDialOption grpc.DialOption, req *master_pb.StatisticsRequest) (resp *master_pb.StatisticsResponse, err error) {
err = withMasterServerClient(server, func(masterClient master_pb.SeaweedClient) error { err = withMasterServerClient(server, grpcDialOption, func(masterClient master_pb.SeaweedClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()

View file

@ -2,6 +2,7 @@ package operation
import ( import (
"bytes" "bytes"
"google.golang.org/grpc"
"io" "io"
"mime" "mime"
"net/url" "net/url"
@ -36,7 +37,7 @@ type SubmitResult struct {
Error string `json:"error,omitempty"` Error string `json:"error,omitempty"`
} }
func SubmitFiles(master string, files []FilePart, func SubmitFiles(master string, grpcDialOption grpc.DialOption, files []FilePart,
replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) { replication string, collection string, dataCenter string, ttl string, maxMB int) ([]SubmitResult, error) {
results := make([]SubmitResult, len(files)) results := make([]SubmitResult, len(files))
for index, file := range files { for index, file := range files {
@ -49,7 +50,7 @@ func SubmitFiles(master string, files []FilePart,
DataCenter: dataCenter, DataCenter: dataCenter,
Ttl: ttl, Ttl: ttl,
} }
ret, err := Assign(master, ar) ret, err := Assign(master, grpcDialOption, ar)
if err != nil { if err != nil {
for index, _ := range files { for index, _ := range files {
results[index].Error = err.Error() results[index].Error = err.Error()
@ -65,7 +66,7 @@ func SubmitFiles(master string, files []FilePart,
file.Replication = replication file.Replication = replication
file.Collection = collection file.Collection = collection
file.DataCenter = dataCenter file.DataCenter = dataCenter
results[index].Size, err = file.Upload(maxMB, master, ret.Auth) results[index].Size, err = file.Upload(maxMB, master, ret.Auth, grpcDialOption)
if err != nil { if err != nil {
results[index].Error = err.Error() results[index].Error = err.Error()
} }
@ -108,7 +109,7 @@ func newFilePart(fullPathFilename string) (ret FilePart, err error) {
return ret, nil return ret, nil
} }
func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (retSize uint32, err error) { func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt, grpcDialOption grpc.DialOption) (retSize uint32, err error) {
fileUrl := "http://" + fi.Server + "/" + fi.Fid fileUrl := "http://" + fi.Server + "/" + fi.Fid
if fi.ModTime != 0 { if fi.ModTime != 0 {
fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime)) fileUrl += "?ts=" + strconv.Itoa(int(fi.ModTime))
@ -136,7 +137,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re
Collection: fi.Collection, Collection: fi.Collection,
Ttl: fi.Ttl, Ttl: fi.Ttl,
} }
ret, err = Assign(master, ar) ret, err = Assign(master, grpcDialOption, ar)
if err != nil { if err != nil {
return return
} }
@ -149,10 +150,10 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re
Collection: fi.Collection, Collection: fi.Collection,
Ttl: fi.Ttl, Ttl: fi.Ttl,
} }
ret, err = Assign(master, ar) ret, err = Assign(master, grpcDialOption, ar)
if err != nil { if err != nil {
// delete all uploaded chunks // delete all uploaded chunks
cm.DeleteChunks(master) cm.DeleteChunks(master, grpcDialOption)
return return
} }
id = ret.Fid id = ret.Fid
@ -170,7 +171,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re
ret.Auth) ret.Auth)
if e != nil { if e != nil {
// delete all uploaded chunks // delete all uploaded chunks
cm.DeleteChunks(master) cm.DeleteChunks(master, grpcDialOption)
return 0, e return 0, e
} }
cm.Chunks = append(cm.Chunks, cm.Chunks = append(cm.Chunks,
@ -185,7 +186,7 @@ func (fi FilePart) Upload(maxMB int, master string, jwt security.EncodedJwt) (re
err = upload_chunked_file_manifest(fileUrl, &cm, jwt) err = upload_chunked_file_manifest(fileUrl, &cm, jwt)
if err != nil { if err != nil {
// delete all uploaded chunks // delete all uploaded chunks
cm.DeleteChunks(master) cm.DeleteChunks(master, grpcDialOption)
} }
} else { } else {
ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt) ret, e := Upload(fileUrl, baseName, fi.Reader, false, fi.MimeType, nil, jwt)

View file

@ -3,6 +3,7 @@ package operation
import ( import (
"context" "context"
"fmt" "fmt"
"google.golang.org/grpc"
"io" "io"
"time" "time"
@ -11,9 +12,9 @@ import (
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
) )
func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) { func GetVolumeSyncStatus(server string, grpcDialOption grpc.DialOption, vid uint32) (resp *volume_server_pb.VolumeSyncStatusResponse, err error) {
WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()
@ -26,9 +27,9 @@ func GetVolumeSyncStatus(server string, vid uint32) (resp *volume_server_pb.Volu
return return
} }
func GetVolumeIdxEntries(server string, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error { func GetVolumeIdxEntries(server string, grpcDialOption grpc.DialOption, vid uint32, eachEntryFn func(key NeedleId, offset Offset, size uint32)) error {
return WithVolumeServerClient(server, func(client volume_server_pb.VolumeServerClient) error { return WithVolumeServerClient(server, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{ stream, err := client.VolumeSyncIndex(context.Background(), &volume_server_pb.VolumeSyncIndexRequest{
VolumdId: vid, VolumdId: vid,
}) })

View file

@ -105,7 +105,7 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk) (fileId stri
func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { func (fs *FilerSink) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
grpcConnection, err := util.GrpcDial(fs.grpcAddress) grpcConnection, err := util.GrpcDial(fs.grpcAddress, fs.grpcDialOption)
if err != nil { if err != nil {
return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
} }

View file

@ -3,6 +3,9 @@ package filersink
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"google.golang.org/grpc"
"github.com/chrislusf/seaweedfs/weed/filer2" "github.com/chrislusf/seaweedfs/weed/filer2"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -13,13 +16,14 @@ import (
) )
type FilerSink struct { type FilerSink struct {
filerSource *source.FilerSource filerSource *source.FilerSource
grpcAddress string grpcAddress string
dir string dir string
replication string replication string
collection string collection string
ttlSec int32 ttlSec int32
dataCenter string dataCenter string
grpcDialOption grpc.DialOption
} }
func init() { func init() {
@ -55,6 +59,7 @@ func (fs *FilerSink) initialize(grpcAddress string, dir string,
fs.replication = replication fs.replication = replication
fs.collection = collection fs.collection = collection
fs.ttlSec = int32(ttlSec) fs.ttlSec = int32(ttlSec)
fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
return nil return nil
} }

View file

@ -3,6 +3,9 @@ package source
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"google.golang.org/grpc"
"io" "io"
"net/http" "net/http"
"strings" "strings"
@ -17,8 +20,9 @@ type ReplicationSource interface {
} }
type FilerSource struct { type FilerSource struct {
grpcAddress string grpcAddress string
Dir string grpcDialOption grpc.DialOption
Dir string
} }
func (fs *FilerSource) Initialize(configuration util.Configuration) error { func (fs *FilerSource) Initialize(configuration util.Configuration) error {
@ -31,6 +35,7 @@ func (fs *FilerSource) Initialize(configuration util.Configuration) error {
func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) { func (fs *FilerSource) initialize(grpcAddress string, dir string) (err error) {
fs.grpcAddress = grpcAddress fs.grpcAddress = grpcAddress
fs.Dir = dir fs.Dir = dir
fs.grpcDialOption = security.LoadClientTLS(viper.Sub("grpc"), "client")
return nil return nil
} }
@ -40,7 +45,7 @@ func (fs *FilerSource) LookupFileId(part string) (fileUrl string, err error) {
vid := volumeId(part) vid := volumeId(part)
err = fs.withFilerClient(func(client filer_pb.SeaweedFilerClient) error { err = fs.withFilerClient(fs.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
glog.V(4).Infof("read lookup volume id locations: %v", vid) glog.V(4).Infof("read lookup volume id locations: %v", vid)
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{ resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
@ -84,9 +89,9 @@ func (fs *FilerSource) ReadPart(part string) (filename string, header http.Heade
return filename, header, readCloser, err return filename, header, readCloser, err
} }
func (fs *FilerSource) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { func (fs *FilerSource) withFilerClient(grpcDialOption grpc.DialOption, fn func(filer_pb.SeaweedFilerClient) error) error {
grpcConnection, err := util.GrpcDial(fs.grpcAddress) grpcConnection, err := util.GrpcDial(fs.grpcAddress, grpcDialOption)
if err != nil { if err != nil {
return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err) return fmt.Errorf("fail to dial %s: %v", fs.grpcAddress, err)
} }

View file

@ -37,7 +37,7 @@ func encodeResponse(response interface{}) []byte {
func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error { func (s3a *S3ApiServer) withFilerClient(fn func(filer_pb.SeaweedFilerClient) error) error {
grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress) grpcConnection, err := util.GrpcDial(s3a.option.FilerGrpcAddress, s3a.option.GrpcDialOption)
if err != nil { if err != nil {
return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err) return fmt.Errorf("fail to dial %s: %v", s3a.option.FilerGrpcAddress, err)
} }

View file

@ -8,6 +8,7 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer2/postgres" _ "github.com/chrislusf/seaweedfs/weed/filer2/postgres"
_ "github.com/chrislusf/seaweedfs/weed/filer2/redis" _ "github.com/chrislusf/seaweedfs/weed/filer2/redis"
"github.com/gorilla/mux" "github.com/gorilla/mux"
"google.golang.org/grpc"
"net/http" "net/http"
) )
@ -16,6 +17,7 @@ type S3ApiServerOption struct {
FilerGrpcAddress string FilerGrpcAddress string
DomainName string DomainName string
BucketsPath string BucketsPath string
GrpcDialOption grpc.DialOption
} }
type S3ApiServer struct { type S3ApiServer struct {

66
weed/security/tls.go Normal file
View file

@ -0,0 +1,66 @@
package security
import (
"crypto/tls"
"crypto/x509"
"github.com/spf13/viper"
"io/ioutil"
"github.com/chrislusf/seaweedfs/weed/glog"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
func LoadServerTLS(config *viper.Viper, component string) grpc.ServerOption {
if config == nil {
return nil
}
// load cert/key, ca cert
cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
if err != nil {
glog.Errorf("load cert/key error: %v", err)
return nil
}
caCert, err := ioutil.ReadFile(config.GetString("ca"))
if err != nil {
glog.Errorf("read ca cert file error: %v", err)
return nil
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
ta := credentials.NewTLS(&tls.Config{
Certificates: []tls.Certificate{cert},
ClientCAs: caCertPool,
ClientAuth: tls.RequireAndVerifyClientCert,
})
return grpc.Creds(ta)
}
func LoadClientTLS(config *viper.Viper, component string) grpc.DialOption {
if config == nil {
return grpc.WithInsecure()
}
// load cert/key, cacert
cert, err := tls.LoadX509KeyPair(config.GetString(component+".cert"), config.GetString(component+".key"))
if err != nil {
glog.Errorf("load cert/key error: %v", err)
return grpc.WithInsecure()
}
caCert, err := ioutil.ReadFile(config.GetString("ca"))
if err != nil {
glog.Errorf("read ca cert file error: %v", err)
return grpc.WithInsecure()
}
caCertPool := x509.NewCertPool()
caCertPool.AppendCertsFromPEM(caCert)
ta := credentials.NewTLS(&tls.Config{
Certificates: []tls.Certificate{cert},
RootCAs: caCertPool,
InsecureSkipVerify: true,
})
return grpc.WithTransportCredentials(ta)
}

View file

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"google.golang.org/grpc"
"net/http" "net/http"
"path/filepath" "path/filepath"
"strconv" "strconv"
@ -81,7 +82,7 @@ func debug(params ...interface{}) {
glog.V(4).Infoln(params...) glog.V(4).Infoln(params...)
} }
func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string) { func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl string, grpcDialOption grpc.DialOption) {
m := make(map[string]interface{}) m := make(map[string]interface{})
if r.Method != "POST" { if r.Method != "POST" {
writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!")) writeJsonError(w, r, http.StatusMethodNotAllowed, errors.New("Only submit via POST!"))
@ -111,7 +112,7 @@ func submitForClientHandler(w http.ResponseWriter, r *http.Request, masterUrl st
Collection: r.FormValue("collection"), Collection: r.FormValue("collection"),
Ttl: r.FormValue("ttl"), Ttl: r.FormValue("ttl"),
} }
assignResult, ae := operation.Assign(masterUrl, ar) assignResult, ae := operation.Assign(masterUrl, grpcDialOption, ar)
if ae != nil { if ae != nil {
writeJsonError(w, r, http.StatusInternalServerError, ae) writeJsonError(w, r, http.StatusInternalServerError, ae)
return return

View file

@ -220,7 +220,7 @@ func (fs *FilerServer) AssignVolume(ctx context.Context, req *filer_pb.AssignVol
DataCenter: "", DataCenter: "",
} }
} }
assignResult, err := operation.Assign(fs.filer.GetMaster(), assignRequest, altRequest) assignResult, err := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, assignRequest, altRequest)
if err != nil { if err != nil {
return nil, fmt.Errorf("assign volume: %v", err) return nil, fmt.Errorf("assign volume: %v", err)
} }
@ -254,7 +254,7 @@ func (fs *FilerServer) Statistics(ctx context.Context, req *filer_pb.StatisticsR
Ttl: req.Ttl, Ttl: req.Ttl,
} }
output, err := operation.Statistics(fs.filer.GetMaster(), input) output, err := operation.Statistics(fs.filer.GetMaster(), fs.grpcDialOption, input)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -1,6 +1,7 @@
package weed_server package weed_server
import ( import (
"google.golang.org/grpc"
"net/http" "net/http"
"os" "os"
@ -34,22 +35,24 @@ type FilerOption struct {
} }
type FilerServer struct { type FilerServer struct {
option *FilerOption option *FilerOption
secret security.SigningKey secret security.SigningKey
filer *filer2.Filer filer *filer2.Filer
grpcDialOption grpc.DialOption
} }
func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) { func NewFilerServer(defaultMux, readonlyMux *http.ServeMux, option *FilerOption) (fs *FilerServer, err error) {
fs = &FilerServer{ fs = &FilerServer{
option: option, option: option,
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "filer"),
} }
if len(option.Masters) == 0 { if len(option.Masters) == 0 {
glog.Fatal("master list is required!") glog.Fatal("master list is required!")
} }
fs.filer = filer2.NewFiler(option.Masters) fs.filer = filer2.NewFiler(option.Masters, fs.grpcDialOption)
go fs.filer.KeepConnectedToMaster() go fs.filer.KeepConnectedToMaster()

View file

@ -51,7 +51,7 @@ func (fs *FilerServer) assignNewFileInfo(w http.ResponseWriter, r *http.Request,
} }
} }
assignResult, ae := operation.Assign(fs.filer.GetMaster(), ar, altRequest) assignResult, ae := operation.Assign(fs.filer.GetMaster(), fs.grpcDialOption, ar, altRequest)
if ae != nil { if ae != nil {
glog.Errorf("failing to assign a file id: %v", ae) glog.Errorf("failing to assign a file id: %v", ae)
writeJsonError(w, r, http.StatusInternalServerError, ae) writeJsonError(w, r, http.StatusInternalServerError, ae)

View file

@ -76,7 +76,7 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
} }
ms.vgLock.Lock() ms.vgLock.Lock()
if !ms.Topo.HasWritableVolume(option) { if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil {
ms.vgLock.Unlock() ms.vgLock.Unlock()
return nil, fmt.Errorf("Cannot grow volume group! %v", err) return nil, fmt.Errorf("Cannot grow volume group! %v", err)
} }

View file

@ -2,6 +2,7 @@ package weed_server
import ( import (
"fmt" "fmt"
"google.golang.org/grpc"
"net/http" "net/http"
"net/http/httputil" "net/http/httputil"
"net/url" "net/url"
@ -37,6 +38,8 @@ type MasterServer struct {
// notifying clients // notifying clients
clientChansLock sync.RWMutex clientChansLock sync.RWMutex
clientChans map[string]chan *master_pb.VolumeLocation clientChans map[string]chan *master_pb.VolumeLocation
grpcDialOpiton grpc.DialOption
} }
func NewMasterServer(r *mux.Router, port int, metaFolder string, func NewMasterServer(r *mux.Router, port int, metaFolder string,
@ -48,7 +51,6 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string,
whiteList []string, whiteList []string,
) *MasterServer { ) *MasterServer {
LoadConfiguration("security", false)
v := viper.GetViper() v := viper.GetViper()
signingKey := v.GetString("jwt.signing.key") signingKey := v.GetString("jwt.signing.key")
@ -64,6 +66,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string,
defaultReplicaPlacement: defaultReplicaPlacement, defaultReplicaPlacement: defaultReplicaPlacement,
garbageThreshold: garbageThreshold, garbageThreshold: garbageThreshold,
clientChans: make(map[string]chan *master_pb.VolumeLocation), clientChans: make(map[string]chan *master_pb.VolumeLocation),
grpcDialOpiton: security.LoadClientTLS(v.Sub("grpc"), "master"),
} }
ms.bounedLeaderChan = make(chan int, 16) ms.bounedLeaderChan = make(chan int, 16)
seq := sequence.NewMemorySequencer() seq := sequence.NewMemorySequencer()
@ -89,7 +92,7 @@ func NewMasterServer(r *mux.Router, port int, metaFolder string,
r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler)) r.HandleFunc("/stats/memory", ms.guard.WhiteList(statsMemoryHandler))
r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler)) r.HandleFunc("/{fileId}", ms.proxyToLeader(ms.redirectHandler))
ms.Topo.StartRefreshWritableVolumes(garbageThreshold, ms.preallocate) ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOpiton, garbageThreshold, ms.preallocate)
return ms return ms
} }

View file

@ -93,7 +93,7 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
ms.vgLock.Lock() ms.vgLock.Lock()
defer ms.vgLock.Unlock() defer ms.vgLock.Unlock()
if !ms.Topo.HasWritableVolume(option) { if !ms.Topo.HasWritableVolume(option) {
if _, err = ms.vg.AutomaticGrowByType(option, ms.Topo); err != nil { if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOpiton, ms.Topo); err != nil {
writeJsonError(w, r, http.StatusInternalServerError, writeJsonError(w, r, http.StatusInternalServerError,
fmt.Errorf("Cannot grow volume group! %v", err)) fmt.Errorf("Cannot grow volume group! %v", err))
return return

View file

@ -24,7 +24,7 @@ func (ms *MasterServer) collectionDeleteHandler(w http.ResponseWriter, r *http.R
return return
} }
for _, server := range collection.ListVolumeServers() { for _, server := range collection.ListVolumeServers() {
err := operation.WithVolumeServerClient(server.Url(), func(client volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(server.Url(), ms.grpcDialOpiton, func(client volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()
@ -60,7 +60,7 @@ func (ms *MasterServer) volumeVacuumHandler(w http.ResponseWriter, r *http.Reque
} }
} }
glog.Infoln("garbageThreshold =", gcThreshold) glog.Infoln("garbageThreshold =", gcThreshold)
ms.Topo.Vacuum(gcThreshold, ms.preallocate) ms.Topo.Vacuum(ms.grpcDialOpiton, gcThreshold, ms.preallocate)
ms.dirStatusHandler(w, r) ms.dirStatusHandler(w, r)
} }
@ -76,7 +76,7 @@ func (ms *MasterServer) volumeGrowHandler(w http.ResponseWriter, r *http.Request
if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() { if ms.Topo.FreeSpace() < count*option.ReplicaPlacement.GetCopyCount() {
err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount())) err = errors.New("Only " + strconv.Itoa(ms.Topo.FreeSpace()) + " volumes left! Not enough for " + strconv.Itoa(count*option.ReplicaPlacement.GetCopyCount()))
} else { } else {
count, err = ms.vg.GrowByCountAndType(count, option, ms.Topo) count, err = ms.vg.GrowByCountAndType(ms.grpcDialOpiton, count, option, ms.Topo)
} }
} else { } else {
err = errors.New("parameter count is not found") err = errors.New("parameter count is not found")
@ -126,13 +126,13 @@ func (ms *MasterServer) selfUrl(r *http.Request) string {
} }
func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) { func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *http.Request) {
if ms.Topo.IsLeader() { if ms.Topo.IsLeader() {
submitForClientHandler(w, r, ms.selfUrl(r)) submitForClientHandler(w, r, ms.selfUrl(r), ms.grpcDialOpiton)
} else { } else {
masterUrl, err := ms.Topo.Leader() masterUrl, err := ms.Topo.Leader()
if err != nil { if err != nil {
writeJsonError(w, r, http.StatusInternalServerError, err) writeJsonError(w, r, http.StatusInternalServerError, err)
} else { } else {
submitForClientHandler(w, r, masterUrl) submitForClientHandler(w, r, masterUrl, ms.grpcDialOpiton)
} }
} }
} }

View file

@ -2,6 +2,9 @@ package weed_server
import ( import (
"fmt" "fmt"
"github.com/chrislusf/seaweedfs/weed/security"
"github.com/spf13/viper"
"google.golang.org/grpc"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -19,6 +22,8 @@ func (vs *VolumeServer) heartbeat() {
vs.store.SetDataCenter(vs.dataCenter) vs.store.SetDataCenter(vs.dataCenter)
vs.store.SetRack(vs.rack) vs.store.SetRack(vs.rack)
grpcDialOption := security.LoadClientTLS(viper.Sub("grpc"), "volume")
var err error var err error
var newLeader string var newLeader string
for { for {
@ -31,7 +36,7 @@ func (vs *VolumeServer) heartbeat() {
glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress) glog.V(0).Infof("failed to parse master grpc %v", masterGrpcAddress)
continue continue
} }
newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, time.Duration(vs.pulseSeconds)*time.Second) newLeader, err = vs.doHeartbeat(master, masterGrpcAddress, grpcDialOption, time.Duration(vs.pulseSeconds)*time.Second)
if err != nil { if err != nil {
glog.V(0).Infof("heartbeat error: %v", err) glog.V(0).Infof("heartbeat error: %v", err)
time.Sleep(time.Duration(vs.pulseSeconds) * time.Second) time.Sleep(time.Duration(vs.pulseSeconds) * time.Second)
@ -40,9 +45,9 @@ func (vs *VolumeServer) heartbeat() {
} }
} }
func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, sleepInterval time.Duration) (newLeader string, err error) { func (vs *VolumeServer) doHeartbeat(masterNode, masterGrpcAddress string, grpcDialOption grpc.DialOption, sleepInterval time.Duration) (newLeader string, err error) {
grpcConection, err := util.GrpcDial(masterGrpcAddress) grpcConection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption)
if err != nil { if err != nil {
return "", fmt.Errorf("fail to dial %s : %v", masterNode, err) return "", fmt.Errorf("fail to dial %s : %v", masterNode, err)
} }

View file

@ -1,6 +1,7 @@
package weed_server package weed_server
import ( import (
"google.golang.org/grpc"
"net/http" "net/http"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -10,13 +11,14 @@ import (
) )
type VolumeServer struct { type VolumeServer struct {
MasterNodes []string MasterNodes []string
currentMaster string currentMaster string
pulseSeconds int pulseSeconds int
dataCenter string dataCenter string
rack string rack string
store *storage.Store store *storage.Store
guard *security.Guard guard *security.Guard
grpcDialOption grpc.DialOption
needleMapKind storage.NeedleMapType needleMapKind storage.NeedleMapType
FixJpgOrientation bool FixJpgOrientation bool
@ -33,7 +35,6 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
fixJpgOrientation bool, fixJpgOrientation bool,
readRedirect bool) *VolumeServer { readRedirect bool) *VolumeServer {
LoadConfiguration("security", false)
v := viper.GetViper() v := viper.GetViper()
signingKey := v.GetString("jwt.signing.key") signingKey := v.GetString("jwt.signing.key")
enableUiAccess := v.GetBool("access.ui") enableUiAccess := v.GetBool("access.ui")
@ -45,6 +46,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
needleMapKind: needleMapKind, needleMapKind: needleMapKind,
FixJpgOrientation: fixJpgOrientation, FixJpgOrientation: fixJpgOrientation,
ReadRedirect: readRedirect, ReadRedirect: readRedirect,
grpcDialOption: security.LoadClientTLS(viper.Sub("grpc"), "volume"),
} }
vs.MasterNodes = masterNodes vs.MasterNodes = masterNodes
vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind) vs.store = storage.NewStore(port, ip, publicUrl, folders, maxCounts, vs.needleMapKind)

View file

@ -95,7 +95,7 @@ func (vs *VolumeServer) DeleteHandler(w http.ResponseWriter, r *http.Request) {
return return
} }
// make sure all chunks had deleted before delete manifest // make sure all chunks had deleted before delete manifest
if e := chunkManifest.DeleteChunks(vs.GetMaster()); e != nil { if e := chunkManifest.DeleteChunks(vs.GetMaster(), vs.grpcDialOption); e != nil {
writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e)) writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("Delete chunks error: %v", e))
return return
} }

View file

@ -3,6 +3,7 @@ package storage
import ( import (
"context" "context"
"fmt" "fmt"
"google.golang.org/grpc"
"io" "io"
"os" "os"
"sort" "sort"
@ -45,12 +46,12 @@ optimized more later).
*/ */
func (v *Volume) Synchronize(volumeServer string) (err error) { func (v *Volume) Synchronize(volumeServer string, grpcDialOption grpc.DialOption) (err error) {
var lastCompactRevision uint16 = 0 var lastCompactRevision uint16 = 0
var compactRevision uint16 = 0 var compactRevision uint16 = 0
var masterMap *needle.CompactMap var masterMap *needle.CompactMap
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
if masterMap, _, compactRevision, err = fetchVolumeFileEntries(volumeServer, v.Id); err != nil { if masterMap, _, compactRevision, err = fetchVolumeFileEntries(volumeServer, grpcDialOption, v.Id); err != nil {
return fmt.Errorf("Failed to sync volume %d entries with %s: %v", v.Id, volumeServer, err) return fmt.Errorf("Failed to sync volume %d entries with %s: %v", v.Id, volumeServer, err)
} }
if lastCompactRevision != compactRevision && lastCompactRevision != 0 { if lastCompactRevision != compactRevision && lastCompactRevision != 0 {
@ -62,7 +63,7 @@ func (v *Volume) Synchronize(volumeServer string) (err error) {
} }
} }
lastCompactRevision = compactRevision lastCompactRevision = compactRevision
if err = v.trySynchronizing(volumeServer, masterMap, compactRevision); err == nil { if err = v.trySynchronizing(volumeServer, grpcDialOption, masterMap, compactRevision); err == nil {
return return
} }
} }
@ -77,7 +78,7 @@ func (a ByOffset) Less(i, j int) bool { return a[i].Offset < a[j].Offset }
// trySynchronizing sync with remote volume server incrementally by // trySynchronizing sync with remote volume server incrementally by
// make up the local and remote delta. // make up the local and remote delta.
func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.CompactMap, compactRevision uint16) error { func (v *Volume) trySynchronizing(volumeServer string, grpcDialOption grpc.DialOption, masterMap *needle.CompactMap, compactRevision uint16) error {
slaveIdxFile, err := os.Open(v.nm.IndexFileName()) slaveIdxFile, err := os.Open(v.nm.IndexFileName())
if err != nil { if err != nil {
return fmt.Errorf("Open volume %d index file: %v", v.Id, err) return fmt.Errorf("Open volume %d index file: %v", v.Id, err)
@ -126,7 +127,7 @@ func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.Compact
continue continue
} }
// add master file entry to local data file // add master file entry to local data file
if err := v.fetchNeedle(volumeServer, needleValue, compactRevision); err != nil { if err := v.fetchNeedle(volumeServer, grpcDialOption, needleValue, compactRevision); err != nil {
glog.V(0).Infof("Fetch needle %v from %s: %v", needleValue, volumeServer, err) glog.V(0).Infof("Fetch needle %v from %s: %v", needleValue, volumeServer, err)
return err return err
} }
@ -136,16 +137,16 @@ func (v *Volume) trySynchronizing(volumeServer string, masterMap *needle.Compact
return nil return nil
} }
func fetchVolumeFileEntries(volumeServer string, vid VolumeId) (m *needle.CompactMap, lastOffset uint64, compactRevision uint16, err error) { func fetchVolumeFileEntries(volumeServer string, grpcDialOption grpc.DialOption, vid VolumeId) (m *needle.CompactMap, lastOffset uint64, compactRevision uint16, err error) {
m = needle.NewCompactMap() m = needle.NewCompactMap()
syncStatus, err := operation.GetVolumeSyncStatus(volumeServer, uint32(vid)) syncStatus, err := operation.GetVolumeSyncStatus(volumeServer, grpcDialOption, uint32(vid))
if err != nil { if err != nil {
return m, 0, 0, err return m, 0, 0, err
} }
total := 0 total := 0
err = operation.GetVolumeIdxEntries(volumeServer, uint32(vid), func(key NeedleId, offset Offset, size uint32) { err = operation.GetVolumeIdxEntries(volumeServer, grpcDialOption, uint32(vid), func(key NeedleId, offset Offset, size uint32) {
// println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size) // println("remote key", key, "offset", offset*NeedlePaddingSize, "size", size)
if offset > 0 && size != TombstoneFileSize { if offset > 0 && size != TombstoneFileSize {
m.Set(NeedleId(key), offset, size) m.Set(NeedleId(key), offset, size)
@ -187,9 +188,9 @@ func (v *Volume) removeNeedle(key NeedleId) {
// fetchNeedle fetches a remote volume needle by vid, id, offset // fetchNeedle fetches a remote volume needle by vid, id, offset
// The compact revision is checked first in case the remote volume // The compact revision is checked first in case the remote volume
// is compacted and the offset is invalid any more. // is compacted and the offset is invalid any more.
func (v *Volume) fetchNeedle(volumeServer string, needleValue needle.NeedleValue, compactRevision uint16) error { func (v *Volume) fetchNeedle(volumeServer string, grpcDialOption grpc.DialOption, needleValue needle.NeedleValue, compactRevision uint16) error {
return operation.WithVolumeServerClient(volumeServer, func(client volume_server_pb.VolumeServerClient) error { return operation.WithVolumeServerClient(volumeServer, grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
stream, err := client.VolumeSyncData(context.Background(), &volume_server_pb.VolumeSyncDataRequest{ stream, err := client.VolumeSyncData(context.Background(), &volume_server_pb.VolumeSyncDataRequest{
VolumdId: uint32(v.Id), VolumdId: uint32(v.Id),
Revision: uint32(compactRevision), Revision: uint32(compactRevision),

View file

@ -2,6 +2,7 @@ package topology
import ( import (
"context" "context"
"google.golang.org/grpc"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/operation" "github.com/chrislusf/seaweedfs/weed/operation"
@ -13,9 +14,9 @@ type AllocateVolumeResult struct {
Error string Error string
} }
func AllocateVolume(dn *DataNode, vid storage.VolumeId, option *VolumeGrowOption) error { func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid storage.VolumeId, option *VolumeGrowOption) error {
return operation.WithVolumeServerClient(dn.Url(), func(client volume_server_pb.VolumeServerClient) error { return operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(client volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()

View file

@ -1,6 +1,7 @@
package topology package topology
import ( import (
"google.golang.org/grpc"
"math/rand" "math/rand"
"time" "time"
@ -8,7 +9,7 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
) )
func (t *Topology) StartRefreshWritableVolumes(garbageThreshold float64, preallocate int64) { func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) {
go func() { go func() {
for { for {
if t.IsLeader() { if t.IsLeader() {
@ -22,7 +23,7 @@ func (t *Topology) StartRefreshWritableVolumes(garbageThreshold float64, preallo
c := time.Tick(15 * time.Minute) c := time.Tick(15 * time.Minute)
for _ = range c { for _ = range c {
if t.IsLeader() { if t.IsLeader() {
t.Vacuum(garbageThreshold, preallocate) t.Vacuum(grpcDialOption, garbageThreshold, preallocate)
} }
} }
}(garbageThreshold) }(garbageThreshold)

View file

@ -2,6 +2,7 @@ package topology
import ( import (
"context" "context"
"google.golang.org/grpc"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
@ -10,11 +11,11 @@ import (
"github.com/chrislusf/seaweedfs/weed/storage" "github.com/chrislusf/seaweedfs/weed/storage"
) )
func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool { func batchVacuumVolumeCheck(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, garbageThreshold float64) bool {
ch := make(chan bool, locationlist.Length()) ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list { for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) { go func(index int, url string, vid storage.VolumeId) {
err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(5*time.Second))
defer cancel() defer cancel()
@ -46,13 +47,13 @@ func batchVacuumVolumeCheck(vl *VolumeLayout, vid storage.VolumeId, locationlist
} }
return isCheckSuccess return isCheckSuccess
} }
func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool { func batchVacuumVolumeCompact(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList, preallocate int64) bool {
vl.removeFromWritable(vid) vl.removeFromWritable(vid)
ch := make(chan bool, locationlist.Length()) ch := make(chan bool, locationlist.Length())
for index, dn := range locationlist.list { for index, dn := range locationlist.list {
go func(index int, url string, vid storage.VolumeId) { go func(index int, url string, vid storage.VolumeId) {
glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url) glog.V(0).Infoln(index, "Start vacuuming", vid, "on", url)
err := operation.WithVolumeServerClient(url, func(volumeServerClient volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(url, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{ _, err := volumeServerClient.VacuumVolumeCompact(context.Background(), &volume_server_pb.VacuumVolumeCompactRequest{
VolumdId: uint32(vid), VolumdId: uint32(vid),
}) })
@ -79,11 +80,11 @@ func batchVacuumVolumeCompact(vl *VolumeLayout, vid storage.VolumeId, locationli
} }
return isVacuumSuccess return isVacuumSuccess
} }
func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool { func batchVacuumVolumeCommit(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) bool {
isCommitSuccess := true isCommitSuccess := true
for _, dn := range locationlist.list { for _, dn := range locationlist.list {
glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url()) glog.V(0).Infoln("Start Committing vacuum", vid, "on", dn.Url())
err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{ _, err := volumeServerClient.VacuumVolumeCommit(context.Background(), &volume_server_pb.VacuumVolumeCommitRequest{
VolumdId: uint32(vid), VolumdId: uint32(vid),
}) })
@ -101,10 +102,10 @@ func batchVacuumVolumeCommit(vl *VolumeLayout, vid storage.VolumeId, locationlis
} }
return isCommitSuccess return isCommitSuccess
} }
func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) { func batchVacuumVolumeCleanup(grpcDialOption grpc.DialOption, vl *VolumeLayout, vid storage.VolumeId, locationlist *VolumeLocationList) {
for _, dn := range locationlist.list { for _, dn := range locationlist.list {
glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url()) glog.V(0).Infoln("Start cleaning up", vid, "on", dn.Url())
err := operation.WithVolumeServerClient(dn.Url(), func(volumeServerClient volume_server_pb.VolumeServerClient) error { err := operation.WithVolumeServerClient(dn.Url(), grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
_, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{ _, err := volumeServerClient.VacuumVolumeCleanup(context.Background(), &volume_server_pb.VacuumVolumeCleanupRequest{
VolumdId: uint32(vid), VolumdId: uint32(vid),
}) })
@ -118,21 +119,21 @@ func batchVacuumVolumeCleanup(vl *VolumeLayout, vid storage.VolumeId, locationli
} }
} }
func (t *Topology) Vacuum(garbageThreshold float64, preallocate int64) int { func (t *Topology) Vacuum(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) int {
glog.V(1).Infof("Start vacuum on demand with threshold: %f", garbageThreshold) glog.V(1).Infof("Start vacuum on demand with threshold: %f", garbageThreshold)
for _, col := range t.collectionMap.Items() { for _, col := range t.collectionMap.Items() {
c := col.(*Collection) c := col.(*Collection)
for _, vl := range c.storageType2VolumeLayout.Items() { for _, vl := range c.storageType2VolumeLayout.Items() {
if vl != nil { if vl != nil {
volumeLayout := vl.(*VolumeLayout) volumeLayout := vl.(*VolumeLayout)
vacuumOneVolumeLayout(volumeLayout, c, garbageThreshold, preallocate) vacuumOneVolumeLayout(grpcDialOption, volumeLayout, c, garbageThreshold, preallocate)
} }
} }
} }
return 0 return 0
} }
func vacuumOneVolumeLayout(volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) { func vacuumOneVolumeLayout(grpcDialOption grpc.DialOption, volumeLayout *VolumeLayout, c *Collection, garbageThreshold float64, preallocate int64) {
volumeLayout.accessLock.RLock() volumeLayout.accessLock.RLock()
tmpMap := make(map[storage.VolumeId]*VolumeLocationList) tmpMap := make(map[storage.VolumeId]*VolumeLocationList)
@ -152,11 +153,11 @@ func vacuumOneVolumeLayout(volumeLayout *VolumeLayout, c *Collection, garbageThr
} }
glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid) glog.V(2).Infof("check vacuum on collection:%s volume:%d", c.Name, vid)
if batchVacuumVolumeCheck(volumeLayout, vid, locationList, garbageThreshold) { if batchVacuumVolumeCheck(grpcDialOption, volumeLayout, vid, locationList, garbageThreshold) {
if batchVacuumVolumeCompact(volumeLayout, vid, locationList, preallocate) { if batchVacuumVolumeCompact(grpcDialOption, volumeLayout, vid, locationList, preallocate) {
batchVacuumVolumeCommit(volumeLayout, vid, locationList) batchVacuumVolumeCommit(grpcDialOption, volumeLayout, vid, locationList)
} else { } else {
batchVacuumVolumeCleanup(volumeLayout, vid, locationList) batchVacuumVolumeCleanup(grpcDialOption, volumeLayout, vid, locationList)
} }
} }
} }

View file

@ -2,6 +2,7 @@ package topology
import ( import (
"fmt" "fmt"
"google.golang.org/grpc"
"math/rand" "math/rand"
"sync" "sync"
@ -55,19 +56,19 @@ func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
return return
} }
func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, topo *Topology) (count int, err error) { func (vg *VolumeGrowth) AutomaticGrowByType(option *VolumeGrowOption, grpcDialOption grpc.DialOption, topo *Topology) (count int, err error) {
count, err = vg.GrowByCountAndType(vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()), option, topo) count, err = vg.GrowByCountAndType(grpcDialOption, vg.findVolumeCount(option.ReplicaPlacement.GetCopyCount()), option, topo)
if count > 0 && count%option.ReplicaPlacement.GetCopyCount() == 0 { if count > 0 && count%option.ReplicaPlacement.GetCopyCount() == 0 {
return count, nil return count, nil
} }
return count, err return count, err
} }
func (vg *VolumeGrowth) GrowByCountAndType(targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) { func (vg *VolumeGrowth) GrowByCountAndType(grpcDialOption grpc.DialOption, targetCount int, option *VolumeGrowOption, topo *Topology) (counter int, err error) {
vg.accessLock.Lock() vg.accessLock.Lock()
defer vg.accessLock.Unlock() defer vg.accessLock.Unlock()
for i := 0; i < targetCount; i++ { for i := 0; i < targetCount; i++ {
if c, e := vg.findAndGrow(topo, option); e == nil { if c, e := vg.findAndGrow(grpcDialOption, topo, option); e == nil {
counter += c counter += c
} else { } else {
return counter, e return counter, e
@ -76,13 +77,13 @@ func (vg *VolumeGrowth) GrowByCountAndType(targetCount int, option *VolumeGrowOp
return return
} }
func (vg *VolumeGrowth) findAndGrow(topo *Topology, option *VolumeGrowOption) (int, error) { func (vg *VolumeGrowth) findAndGrow(grpcDialOption grpc.DialOption, topo *Topology, option *VolumeGrowOption) (int, error) {
servers, e := vg.findEmptySlotsForOneVolume(topo, option) servers, e := vg.findEmptySlotsForOneVolume(topo, option)
if e != nil { if e != nil {
return 0, e return 0, e
} }
vid := topo.NextVolumeId() vid := topo.NextVolumeId()
err := vg.grow(topo, vid, option, servers...) err := vg.grow(grpcDialOption, topo, vid, option, servers...)
return len(servers), err return len(servers), err
} }
@ -189,9 +190,9 @@ func (vg *VolumeGrowth) findEmptySlotsForOneVolume(topo *Topology, option *Volum
return return
} }
func (vg *VolumeGrowth) grow(topo *Topology, vid storage.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error { func (vg *VolumeGrowth) grow(grpcDialOption grpc.DialOption, topo *Topology, vid storage.VolumeId, option *VolumeGrowOption, servers ...*DataNode) error {
for _, server := range servers { for _, server := range servers {
if err := AllocateVolume(server, vid, option); err == nil { if err := AllocateVolume(server, grpcDialOption, vid, option); err == nil {
vi := storage.VolumeInfo{ vi := storage.VolumeInfo{
Id: vid, Id: vid,
Size: 0, Size: 0,

View file

@ -38,7 +38,7 @@ func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
// opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second))) // opts = append(opts, grpc.WithTimeout(time.Duration(5*time.Second)))
var options []grpc.DialOption var options []grpc.DialOption
options = append(options, options = append(options,
grpc.WithInsecure(), // grpc.WithInsecure(),
grpc.WithKeepaliveParams(keepalive.ClientParameters{ grpc.WithKeepaliveParams(keepalive.ClientParameters{
Time: 30 * time.Second, // client ping server if no activity for this long Time: 30 * time.Second, // client ping server if no activity for this long
Timeout: 20 * time.Second, Timeout: 20 * time.Second,
@ -48,7 +48,7 @@ func GrpcDial(address string, opts ...grpc.DialOption) (*grpc.ClientConn, error)
options = append(options, opt) options = append(options, opt)
} }
} }
return grpc.Dial(address, opts...) return grpc.Dial(address, options...)
} }
func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error { func WithCachedGrpcClient(fn func(*grpc.ClientConn) error, address string, opts ...grpc.DialOption) error {

View file

@ -3,29 +3,32 @@ package wdclient
import ( import (
"context" "context"
"fmt" "fmt"
"math/rand"
"time" "time"
"github.com/chrislusf/seaweedfs/weed/glog" "github.com/chrislusf/seaweedfs/weed/glog"
"github.com/chrislusf/seaweedfs/weed/pb/master_pb" "github.com/chrislusf/seaweedfs/weed/pb/master_pb"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"math/rand" "google.golang.org/grpc"
) )
type MasterClient struct { type MasterClient struct {
ctx context.Context ctx context.Context
name string name string
currentMaster string currentMaster string
masters []string masters []string
grpcDialOption grpc.DialOption
vidMap vidMap
} }
func NewMasterClient(ctx context.Context, clientName string, masters []string) *MasterClient { func NewMasterClient(ctx context.Context, grpcDialOption grpc.DialOption, clientName string, masters []string) *MasterClient {
return &MasterClient{ return &MasterClient{
ctx: ctx, ctx: ctx,
name: clientName, name: clientName,
masters: masters, masters: masters,
vidMap: newVidMap(), grpcDialOption: grpcDialOption,
vidMap: newVidMap(),
} }
} }
@ -50,7 +53,7 @@ func (mc *MasterClient) KeepConnectedToMaster() {
func (mc *MasterClient) tryAllMasters() { func (mc *MasterClient) tryAllMasters() {
for _, master := range mc.masters { for _, master := range mc.masters {
glog.V(0).Infof("Connecting to master %v", master) glog.V(0).Infof("Connecting to master %v", master)
gprcErr := withMasterClient(master, func(client master_pb.SeaweedClient) error { gprcErr := withMasterClient(master, mc.grpcDialOption, func(client master_pb.SeaweedClient) error {
stream, err := client.KeepConnected(context.Background()) stream, err := client.KeepConnected(context.Background())
if err != nil { if err != nil {
@ -96,14 +99,14 @@ func (mc *MasterClient) tryAllMasters() {
} }
} }
func withMasterClient(master string, fn func(client master_pb.SeaweedClient) error) error { func withMasterClient(master string, grpcDialOption grpc.DialOption, fn func(client master_pb.SeaweedClient) error) error {
masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0) masterGrpcAddress, parseErr := util.ParseServerToGrpcAddress(master, 0)
if parseErr != nil { if parseErr != nil {
return fmt.Errorf("failed to parse master grpc %v", master) return fmt.Errorf("failed to parse master grpc %v", master)
} }
grpcConnection, err := util.GrpcDial(masterGrpcAddress) grpcConnection, err := util.GrpcDial(masterGrpcAddress, grpcDialOption)
if err != nil { if err != nil {
return fmt.Errorf("fail to dial %s: %v", master, err) return fmt.Errorf("fail to dial %s: %v", master, err)
} }

View file

@ -1,15 +0,0 @@
package wdclient
import (
"context"
)
type SeaweedClient struct {
*MasterClient
}
func NewSeaweedClient(ctx context.Context, clientName string, masters []string) *SeaweedClient {
return &SeaweedClient{
MasterClient: NewMasterClient(ctx, clientName, masters),
}
}