1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-07-05 08:36:55 +02:00

fix ec related bugs

This commit is contained in:
Chris Lu 2019-06-05 23:20:26 -07:00
parent 450f4733ce
commit d344e0a035
9 changed files with 51 additions and 9 deletions

View file

@ -76,7 +76,7 @@ func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
}
glog.V(4).Infoln("read bytes", count, "error", err)
if err != nil || count < 0 {
glog.V(0).Infof("read %s error: %v", r.URL.Path, err)
glog.V(0).Infof("read %s isNormalVolume %v error: %v", r.URL.Path, hasVolume, err)
w.WriteHeader(http.StatusNotFound)
return
}

View file

@ -199,6 +199,7 @@ func doDeduplicateEcShards(ctx context.Context, commandEnv *CommandEnv, collecti
if err := sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, ecNode.info.Id, duplicatedShardIds); err != nil {
return err
}
deleteEcVolumeShards(ecNode, vid, duplicatedShardIds)
ecNode.freeEcSlot++
}
}
@ -273,3 +274,27 @@ func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.Shar
return 0
}
func addEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32){
for _, shardInfo := range ecNode.info.EcShardInfos {
if needle.VolumeId(shardInfo.Id) == vid {
for _, shardId := range shardIds{
shardInfo.EcIndexBits = uint32(erasure_coding.ShardBits(shardInfo.EcIndexBits).AddShardId(erasure_coding.ShardId(shardId)))
}
}
}
}
func deleteEcVolumeShards(ecNode *EcNode, vid needle.VolumeId, shardIds []uint32){
for _, shardInfo := range ecNode.info.EcShardInfos {
if needle.VolumeId(shardInfo.Id) == vid {
for _, shardId := range shardIds{
shardInfo.EcIndexBits = uint32(erasure_coding.ShardBits(shardInfo.EcIndexBits).RemoveShardId(erasure_coding.ShardId(shardId)))
}
}
}
}

View file

@ -35,7 +35,14 @@ func moveMountedShardToEcNode(ctx context.Context, commandEnv *CommandEnv, exist
}
// ask source node to delete the shard, and maybe the ecx file
return sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
err = sourceServerDeleteEcShards(ctx, commandEnv.option.GrpcDialOption, collection, vid, existingLocation.info.Id, copiedShardIds)
if err != nil {
return err
}
deleteEcVolumeShards(existingLocation, vid, copiedShardIds)
return nil
}

View file

@ -163,6 +163,8 @@ func rebuildOneEcVolume(ctx context.Context, commandEnv *CommandEnv, rebuilder *
return err
}
addEcVolumeShards(rebuilder, volumeId, generatedShardIds)
return nil
}

View file

@ -223,8 +223,8 @@ func (l *DiskLocation) Close() {
l.Unlock()
l.ecVolumesLock.Lock()
for _, shards := range l.ecVolumes {
shards.Close()
for _, ecVolume := range l.ecVolumes {
ecVolume.Close()
}
l.ecVolumesLock.Unlock()

View file

@ -87,8 +87,8 @@ func (l *DiskLocation) UnloadEcShard(vid needle.VolumeId, shardId erasure_coding
if _, deleted := ecVolume.DeleteEcVolumeShard(shardId); deleted {
if len(ecVolume.Shards) == 0 {
delete(l.ecVolumes, vid)
ecVolume.Close()
}
ecVolume.Close()
return true
}

View file

@ -155,7 +155,7 @@ func (ev *EcVolume) LocateEcShardNeedle(n *needle.Needle, version needle.Version
// find the needle from ecx file
offset, size, err = ev.findNeedleFromEcx(n.Id)
if err != nil {
return types.Offset{}, 0, nil, err
return types.Offset{}, 0, nil, fmt.Errorf("findNeedleFromEcx: %v", err)
}
shard := ev.Shards[0]
@ -173,7 +173,7 @@ func (ev *EcVolume) findNeedleFromEcx(needleId types.NeedleId) (offset types.Off
for l < h {
m := (l + h) / 2
if _, err := ev.ecxFile.ReadAt(buf, m*types.NeedleMapEntrySize); err != nil {
return types.Offset{}, 0, err
return types.Offset{}, 0, fmt.Errorf("ecx file %d read at %d: %v", ev.ecxFileSize, m*types.NeedleMapEntrySize, err)
}
key, offset, size = idx.IdxFileEntry(buf)
if key == needleId {

View file

@ -118,7 +118,7 @@ func (s *Store) ReadEcShardNeedle(ctx context.Context, vid needle.VolumeId, n *n
offset, size, intervals, err := localEcVolume.LocateEcShardNeedle(n, version)
if err != nil {
return 0, err
return 0, fmt.Errorf("locate in local ec volume: %v", err)
}
glog.V(4).Infof("read ec volume %d offset %d size %d intervals:%+v", vid, offset.ToAcutalOffset(), size, intervals)

View file

@ -78,7 +78,7 @@ func (t *Topology) Leader() (string, error) {
return l, nil
}
func (t *Topology) Lookup(collection string, vid needle.VolumeId) []*DataNode {
func (t *Topology) Lookup(collection string, vid needle.VolumeId) (dataNodes []*DataNode) {
//maybe an issue if lots of collections?
if collection == "" {
for _, c := range t.collectionMap.Items() {
@ -91,6 +91,14 @@ func (t *Topology) Lookup(collection string, vid needle.VolumeId) []*DataNode {
return c.(*Collection).Lookup(vid)
}
}
if locations, found := t.LookupEcShards(vid); found {
for _, loc := range locations.Locations {
dataNodes = append(dataNodes, loc...)
}
return dataNodes
}
return nil
}