1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-06-01 00:02:22 +02:00
This commit is contained in:
Chris Lu 2021-08-08 22:30:36 -07:00
parent df85f7a1eb
commit c5f38c365d
12 changed files with 24 additions and 27 deletions

View file

@ -107,12 +107,12 @@ func doFilerBackup(grpcDialOption grpc.DialOption, backupOption *FilerBackupOpti
processEventFn := genProcessFunction(sourcePath, targetPath, dataSink, debug)
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3 * time.Second, func(counter int64, lastTsNs int64) error {
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
glog.V(0).Infof("backup %s progressed to %v %0.2f/sec", sourceFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
return setOffset(grpcDialOption, sourceFiler, BackupKeyPrefix, int32(sinkId), lastTsNs)
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_" + dataSink.GetName(),
return pb.FollowMetadata(sourceFiler, grpcDialOption, "backup_"+dataSink.GetName(),
sourcePath, startFrom.UnixNano(), 0, processEventFnWithOffset, false)
}

View file

@ -189,7 +189,7 @@ func (metaBackup *FilerMetaBackupOptions) streamMetadataBackup() error {
return nil
}
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3 * time.Second, func(counter int64, lastTsNs int64) error {
processEventFnWithOffset := pb.AddOffsetFunc(eachEntryFunc, 3*time.Second, func(counter int64, lastTsNs int64) error {
lastTime := time.Unix(0, lastTsNs)
glog.V(0).Infof("meta backup %s progressed to %v %0.2f/sec", *metaBackup.filerAddress, lastTime, float64(counter)/float64(3))
return metaBackup.setOffset(lastTime)

View file

@ -252,9 +252,9 @@ func updateLocalEntry(filerClient filer_pb.FilerClient, dir string, entry *filer
entry.RemoteEntry = remoteEntry
return filerClient.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
_, err := client.UpdateEntry(context.Background(), &filer_pb.UpdateEntryRequest{
Directory: dir,
Entry: entry,
Directory: dir,
Entry: entry,
})
return err
})
}
}

View file

@ -165,12 +165,12 @@ func doSubscribeFilerMetaChanges(grpcDialOption grpc.DialOption, sourceFiler, so
return persistEventFn(resp)
}
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3 * time.Second, func(counter int64, lastTsNs int64) error {
processEventFnWithOffset := pb.AddOffsetFunc(processEventFn, 3*time.Second, func(counter int64, lastTsNs int64) error {
glog.V(0).Infof("sync %s to %s progressed to %v %0.2f/sec", sourceFiler, targetFiler, time.Unix(0, lastTsNs), float64(counter)/float64(3))
return setOffset(grpcDialOption, targetFiler, SyncKeyPrefix, sourceFilerSignature, lastTsNs)
})
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_" + targetFiler,
return pb.FollowMetadata(sourceFiler, grpcDialOption, "syncTo_"+targetFiler,
sourcePath, sourceFilerOffsetTsNs, targetFilerSignature, processEventFnWithOffset, false)
}

View file

@ -27,5 +27,4 @@ import (
_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
_ "github.com/chrislusf/seaweedfs/weed/filer/sqlite"
)
)

View file

@ -160,8 +160,6 @@ func RemoveRemoteStorageMapping(oldContent []byte, dir string) (newContent []byt
return
}
func ReadMountMappings(grpcDialOption grpc.DialOption, filerAddress string) (mappings *filer_pb.RemoteStorageMapping, readErr error) {
var oldContent []byte
if readErr = pb.WithFilerClient(filerAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {

View file

@ -31,4 +31,4 @@ func TestFilerRemoteStorage_FindRemoteStorageClient(t *testing.T) {
_, _, found4 := rs.FindRemoteStorageClient("/a/b/cc")
assert.Equal(t, false, found4, "should not find storage client")
}
}

View file

@ -9,7 +9,7 @@ func (entry *Entry) IsInRemoteOnly() bool {
return len(entry.Chunks) == 0 && entry.Remote != nil && entry.Remote.Size > 0
}
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data[]byte, err error) {
func (f *Filer) ReadRemote(entry *Entry, offset int64, size int64) (data []byte, err error) {
client, _, found := f.RemoteStorage.GetRemoteStorageClient(entry.Remote.StorageName)
if !found {
return nil, fmt.Errorf("remote storage %v not found", entry.Remote.StorageName)

View file

@ -174,13 +174,13 @@ func toTagging(attributes map[string][]byte) *s3.Tagging {
func (s *s3RemoteStorageClient) readFileRemoteEntry(loc *filer_pb.RemoteStorageLocation) (*filer_pb.RemoteEntry, error) {
resp, err := s.conn.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
if err != nil {
return nil, err
}
return &filer_pb.RemoteEntry{
LastModifiedAt: resp.LastModified.Unix(),
Size: *resp.ContentLength,
@ -200,8 +200,8 @@ func (s *s3RemoteStorageClient) UpdateFileMetadata(loc *filer_pb.RemoteStorageLo
})
} else {
_, err = s.conn.DeleteObjectTagging(&s3.DeleteObjectTaggingInput{
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
Bucket: aws.String(loc.Bucket),
Key: aws.String(loc.Path[1:]),
})
}
return

View file

@ -26,8 +26,8 @@ import (
)
const (
SequencerType = "master.sequencer.type"
SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls"
SequencerType = "master.sequencer.type"
SequencerEtcdUrls = "master.sequencer.sequencer_etcd_urls"
SequencerSnowflakeId = "master.sequencer.sequencer_snowflake_id"
)

View file

@ -18,16 +18,16 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
}
remoteConf := &filer_pb.RemoteConf{
Type: req.RemoteType,
Name: req.RemoteName,
Type: req.RemoteType,
Name: req.RemoteName,
S3AccessKey: req.S3AccessKey,
S3SecretKey: req.S3SecretKey,
S3Region: req.S3Region,
S3Endpoint: req.S3Endpoint,
S3Region: req.S3Region,
S3Endpoint: req.S3Endpoint,
}
client, getClientErr := remote_storage.GetRemoteStorage(remoteConf)
if getClientErr != nil {
if getClientErr != nil {
return nil, fmt.Errorf("get remote client: %v", getClientErr)
}

View file

@ -187,7 +187,7 @@ func TestBalance(t *testing.T) {
func TestVolumeSelection(t *testing.T) {
topologyInfo := parseOutput(topoData)
vids, err := collectVolumeIdsForTierChange(nil, topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0);
vids, err := collectVolumeIdsForTierChange(nil, topologyInfo, 1000, types.ToDiskType("hdd"), "", 20.0, 0)
if err != nil {
t.Errorf("collectVolumeIdsForTierChange: %v", err)
}