From 866197eee3ba3348ab4a1e0cc3f60338f3b9c3a6 Mon Sep 17 00:00:00 2001 From: Chris Lu Date: Thu, 30 May 2019 01:38:59 -0700 Subject: [PATCH] print out the ec balancing plan --- weed/shell/command_ec_balance.go | 245 +++++++++++++++++++++++++++++++ weed/shell/command_ec_encode.go | 102 ++++++++----- 2 files changed, 308 insertions(+), 39 deletions(-) create mode 100644 weed/shell/command_ec_balance.go diff --git a/weed/shell/command_ec_balance.go b/weed/shell/command_ec_balance.go new file mode 100644 index 000000000..b2e3ccaea --- /dev/null +++ b/weed/shell/command_ec_balance.go @@ -0,0 +1,245 @@ +package shell + +import ( + "context" + "flag" + "fmt" + "io" + "math" + + "github.com/chrislusf/seaweedfs/weed/pb/master_pb" + "github.com/chrislusf/seaweedfs/weed/storage/erasure_coding" + "github.com/chrislusf/seaweedfs/weed/storage/needle" +) + +func init() { + commands = append(commands, &commandEcBalance{}) +} + +type commandEcBalance struct { +} + +func (c *commandEcBalance) Name() string { + return "ec.balance" +} + +func (c *commandEcBalance) Help() string { + return `balance all ec shards among volume servers + + ec.balance [-c ALL|EACH_COLLECTION|] [-f] + + Algorithm: + + For each type of volume server (different max volume count limit){ + for each collection { + balanceEcVolumes() + } + } + + func balanceEcVolumes(){ + idealWritableVolumes = totalWritableVolumes / numVolumeServers + for { + sort all volume servers ordered by the number of local writable volumes + pick the volume server A with the lowest number of writable volumes x + pick the volume server B with the highest number of writable volumes y + if y > idealWritableVolumes and x +1 <= idealWritableVolumes { + if B has a writable volume id v that A does not have { + move writable volume v from A to B + } + } + } + } + +` +} + +func (c *commandEcBalance) Do(args []string, commandEnv *commandEnv, writer io.Writer) (err error) { + + balanceCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + collection := balanceCommand.String("c", "EACH_COLLECTION", "collection name, or use \"ALL_COLLECTIONS\" across collections, \"EACH_COLLECTION\" for each collection") + dc := balanceCommand.String("dataCenter", "", "only apply the balancing for this dataCenter") + applyBalancing := balanceCommand.Bool("f", false, "apply the balancing plan.") + if err = balanceCommand.Parse(args); err != nil { + return nil + } + + var resp *master_pb.VolumeListResponse + ctx := context.Background() + err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return err + } + + typeToNodes := collectVolumeServersByType(resp.TopologyInfo, *dc) + for _, volumeServers := range typeToNodes { + + fmt.Printf("balanceEcVolumes servers %d\n", len(volumeServers)) + + if len(volumeServers) < 2 { + continue + } + + if *collection == "EACH_COLLECTION" { + collections, err := ListCollectionNames(commandEnv) + if err != nil { + return err + } + fmt.Printf("balanceEcVolumes collections %+v\n", len(collections)) + for _, c := range collections { + fmt.Printf("balanceEcVolumes collection %+v\n", c) + if err = balanceEcVolumes(commandEnv, c, *applyBalancing); err != nil { + return err + } + } + } else if *collection == "ALL" { + if err = balanceEcVolumes(commandEnv, "ALL", *applyBalancing); err != nil { + return err + } + } else { + if err = balanceEcVolumes(commandEnv, *collection, *applyBalancing); err != nil { + return err + } + } + + } + return nil +} + +func balanceEcVolumes(commandEnv *commandEnv, collection string, applyBalancing bool) error { + + ctx := context.Background() + + fmt.Printf("balanceEcVolumes %s\n", collection) + + // collect all ec nodes + allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv) + if err != nil { + return err + } + if totalFreeEcSlots < 1 { + return fmt.Errorf("no free ec shard slots. only %d left", totalFreeEcSlots) + } + + // vid => []ecNode + vidLocations := make(map[needle.VolumeId][]*EcNode) + for _, ecNode := range allEcNodes { + for _, shardInfo := range ecNode.info.EcShardInfos { + vidLocations[needle.VolumeId(shardInfo.Id)] = append(vidLocations[needle.VolumeId(shardInfo.Id)], ecNode) + } + } + + for vid, locations := range vidLocations { + + // collect all ec nodes with at least one free slot + var possibleDestinationEcNodes []*EcNode + for _, ecNode := range allEcNodes { + if ecNode.freeEcSlot > 0 { + possibleDestinationEcNodes = append(possibleDestinationEcNodes, ecNode) + } + } + + // calculate average number of shards an ec node should have for one volume + averageShardsPerEcNode := int(math.Ceil(float64(erasure_coding.TotalShardsCount) / float64(len(possibleDestinationEcNodes)))) + + fmt.Printf("vid %d averageShardsPerEcNode %+v\n", vid, averageShardsPerEcNode) + + // check whether this volume has ecNodes that are over average + isOverLimit := false + for _, ecNode := range locations { + shardBits := findEcVolumeShards(ecNode, vid) + if shardBits.ShardIdCount() > averageShardsPerEcNode { + isOverLimit = true + fmt.Printf("vid %d %s has %d shards, isOverLimit %+v\n", vid, ecNode.info.Id, shardBits.ShardIdCount(), isOverLimit) + break + } + } + + if isOverLimit { + + if err := spreadShardsIntoMoreDataNodes(ctx, commandEnv, averageShardsPerEcNode, vid, locations, possibleDestinationEcNodes, applyBalancing); err != nil { + return err + } + + } + + } + + return nil +} + +func spreadShardsIntoMoreDataNodes(ctx context.Context, commandEnv *commandEnv, averageShardsPerEcNode int, vid needle.VolumeId, existingLocations, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { + + for _, ecNode := range existingLocations { + + shardBits := findEcVolumeShards(ecNode, vid) + overLimitCount := shardBits.ShardIdCount() - averageShardsPerEcNode + + for _, shardId := range shardBits.ShardIds() { + + if overLimitCount <= 0 { + break + } + + fmt.Printf("%s has %d overlimit, moving ec shard %d.%d\n", ecNode.info.Id, overLimitCount, vid, shardId) + + err := pickOneEcNodeAndMoveOneShard(ctx, commandEnv, averageShardsPerEcNode, ecNode, vid, shardId, possibleDestinationEcNodes, applyBalancing) + if err != nil { + return err + } + + overLimitCount-- + } + } + + return nil +} + +func pickOneEcNodeAndMoveOneShard(ctx context.Context, commandEnv *commandEnv, averageShardsPerEcNode int, existingLocation *EcNode, vid needle.VolumeId, shardId erasure_coding.ShardId, possibleDestinationEcNodes []*EcNode, applyBalancing bool) error { + + sortEcNodes(possibleDestinationEcNodes) + + for _, destEcNode := range possibleDestinationEcNodes { + if destEcNode.info.Id == existingLocation.info.Id { + continue + } + + if destEcNode.freeEcSlot <= 0 { + continue + } + if findEcVolumeShards(destEcNode, vid).ShardIdCount() >= averageShardsPerEcNode { + continue + } + + fmt.Printf("%s moves ec shard %d.%d to %s\n", existingLocation.info.Id, vid, shardId, destEcNode.info.Id) + + err := moveOneShardToEcNode(ctx, commandEnv, existingLocation, vid, shardId, destEcNode, applyBalancing) + if err != nil { + return err + } + + destEcNode.freeEcSlot-- + return nil + } + + return nil +} + +func moveOneShardToEcNode(ctx context.Context, commandEnv *commandEnv, existingLocation *EcNode, vid needle.VolumeId, shardId erasure_coding.ShardId, destinationEcNode *EcNode, applyBalancing bool) error { + + fmt.Printf("moved ec shard %d.%d %s => %s\n", vid, shardId, existingLocation.info.Id, destinationEcNode.info.Id) + return nil +} + +func findEcVolumeShards(ecNode *EcNode, vid needle.VolumeId) erasure_coding.ShardBits { + + for _, shardInfo := range ecNode.info.EcShardInfos { + if needle.VolumeId(shardInfo.Id) == vid { + return erasure_coding.ShardBits(shardInfo.EcIndexBits) + } + } + + return 0 +} diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go index ac42b520d..817529478 100644 --- a/weed/shell/command_ec_encode.go +++ b/weed/shell/command_ec_encode.go @@ -97,40 +97,24 @@ func generateEcShards(ctx context.Context, grpcDialOption grpc.DialOption, volum func balanceEcShards(ctx context.Context, commandEnv *commandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) { - // list all possible locations - var resp *master_pb.VolumeListResponse - err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { - resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) - return err - }) + allEcNodes, totalFreeEcSlots, err := collectEcNodes(ctx, commandEnv) if err != nil { return err } - // find out all volume servers with one volume slot left. - var allDataNodes []*master_pb.DataNodeInfo - var totalFreeEcSlots uint32 - eachDataNode(resp.TopologyInfo, func(dn *master_pb.DataNodeInfo) { - if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 { - allDataNodes = append(allDataNodes, dn) - totalFreeEcSlots += freeEcSlots - } - }) if totalFreeEcSlots < erasure_coding.TotalShardsCount { return fmt.Errorf("not enough free ec shard slots. only %d left", totalFreeEcSlots) } - sort.Slice(allDataNodes, func(i, j int) bool { - return countFreeShardSlots(allDataNodes[j]) < countFreeShardSlots(allDataNodes[i]) - }) - if len(allDataNodes) > erasure_coding.TotalShardsCount { - allDataNodes = allDataNodes[:erasure_coding.TotalShardsCount] + allocatedDataNodes := allEcNodes + if len(allocatedDataNodes) > erasure_coding.TotalShardsCount { + allocatedDataNodes = allocatedDataNodes[:erasure_coding.TotalShardsCount] } // calculate how many shards to allocate for these servers - allocated := balancedEcDistribution(allDataNodes) + allocated := balancedEcDistribution(allocatedDataNodes) // ask the data nodes to copy from the source volume server - copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allDataNodes, allocated, volumeId, collection, existingLocations[0]) + copiedShardIds, err := parallelCopyEcShardsFromSource(ctx, commandEnv.option.GrpcDialOption, allocatedDataNodes, allocated, volumeId, collection, existingLocations[0]) if err != nil { return nil } @@ -154,7 +138,7 @@ func balanceEcShards(ctx context.Context, commandEnv *commandEnv, volumeId needl } func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServers []*master_pb.DataNodeInfo, allocated []uint32, + targetServers []*EcNode, allocated []int, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) { // parallelize @@ -167,7 +151,7 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia } wg.Add(1) - go func(server *master_pb.DataNodeInfo, startFromShardId uint32, shardCount uint32) { + go func(server *EcNode, startFromShardId uint32, shardCount int) { defer wg.Done() copiedShardIds, copyErr := oneServerCopyEcShardsFromSource(ctx, grpcDialOption, server, startFromShardId, shardCount, volumeId, collection, existingLocation) @@ -175,9 +159,10 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia err = copyErr } else { shardIdChan <- copiedShardIds + server.freeEcSlot -= len(copiedShardIds) } }(server, startFromShardId, allocated[i]) - startFromShardId += allocated[i] + startFromShardId += uint32(allocated[i]) } wg.Wait() close(shardIdChan) @@ -194,18 +179,18 @@ func parallelCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Dia } func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.DialOption, - targetServer *master_pb.DataNodeInfo, startFromShardId uint32, shardCount uint32, + targetServer *EcNode, startFromShardId uint32, shardCount int, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (copiedShardIds []uint32, err error) { var shardIdsToCopy []uint32 - for shardId := startFromShardId; shardId < startFromShardId+shardCount; shardId++ { - fmt.Printf("allocate %d.%d %s => %s\n", volumeId, shardId, existingLocation.Url, targetServer.Id) + for shardId := startFromShardId; shardId < startFromShardId+uint32(shardCount); shardId++ { + fmt.Printf("allocate %d.%d %s => %s\n", volumeId, shardId, existingLocation.Url, targetServer.info.Id) shardIdsToCopy = append(shardIdsToCopy, shardId) } - err = operation.WithVolumeServerClient(targetServer.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { + err = operation.WithVolumeServerClient(targetServer.info.Id, grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error { - if targetServer.Id != existingLocation.Url { + if targetServer.info.Id != existingLocation.Url { _, copyErr := volumeServerClient.VolumeEcShardsCopy(ctx, &volume_server_pb.VolumeEcShardsCopyRequest{ VolumeId: uint32(volumeId), @@ -227,7 +212,7 @@ func oneServerCopyEcShardsFromSource(ctx context.Context, grpcDialOption grpc.Di return mountErr } - if targetServer.Id != existingLocation.Url { + if targetServer.info.Id != existingLocation.Url { copiedShardIds = shardIdsToCopy glog.V(0).Infof("%s ec volume %d deletes shards %+v", existingLocation.Url, volumeId, copiedShardIds) } @@ -258,11 +243,11 @@ func sourceServerDeleteEcShards(ctx context.Context, grpcDialOption grpc.DialOpt } -func balancedEcDistribution(servers []*master_pb.DataNodeInfo) (allocated []uint32) { - freeSlots := make([]uint32, len(servers)) - allocated = make([]uint32, len(servers)) +func balancedEcDistribution(servers []*EcNode) (allocated []int) { + freeSlots := make([]int, len(servers)) + allocated = make([]int, len(servers)) for i, server := range servers { - freeSlots[i] = countFreeShardSlots(server) + freeSlots[i] = countFreeShardSlots(server.info) } allocatedCount := 0 for allocatedCount < erasure_coding.TotalShardsCount { @@ -290,14 +275,53 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(*master_pb.DataNodeInfo) } } -func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count uint32) { +func countShards(ecShardInfos []*master_pb.VolumeEcShardInformationMessage) (count int) { for _, ecShardInfo := range ecShardInfos { shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits) - count += uint32(shardBits.ShardIdCount()) + count += shardBits.ShardIdCount() } return } -func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count uint32) { - return uint32(dn.FreeVolumeCount)*10 - countShards(dn.EcShardInfos) +func countFreeShardSlots(dn *master_pb.DataNodeInfo) (count int) { + return int(dn.FreeVolumeCount)*10 - countShards(dn.EcShardInfos) +} + +type EcNode struct { + info *master_pb.DataNodeInfo + freeEcSlot int +} + +func sortEcNodes(ecNodes []*EcNode) { + sort.Slice(ecNodes, func(i, j int) bool { + return ecNodes[i].freeEcSlot > ecNodes[j].freeEcSlot + }) +} + +func collectEcNodes(ctx context.Context, commandEnv *commandEnv) (ecNodes []*EcNode, totalFreeEcSlots int, err error) { + + // list all possible locations + var resp *master_pb.VolumeListResponse + err = commandEnv.masterClient.WithClient(ctx, func(client master_pb.SeaweedClient) error { + resp, err = client.VolumeList(ctx, &master_pb.VolumeListRequest{}) + return err + }) + if err != nil { + return nil, 0, err + } + + // find out all volume servers with one slot left. + eachDataNode(resp.TopologyInfo, func(dn *master_pb.DataNodeInfo) { + if freeEcSlots := countFreeShardSlots(dn); freeEcSlots > 0 { + ecNodes = append(ecNodes, &EcNode{ + info: dn, + freeEcSlot: int(freeEcSlots), + }) + totalFreeEcSlots += freeEcSlots + } + }) + + sortEcNodes(ecNodes) + + return }