1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2024-05-06 03:20:41 +02:00

Revert "Revert "Merge branch 'master' of https://github.com/seaweedfs/seaweedfs""

This reverts commit 8cb42c39
This commit is contained in:
chrislu 2023-09-25 09:35:16 -07:00
parent 3d07895518
commit 645ae8c57b
23 changed files with 84 additions and 70 deletions

View file

@ -288,8 +288,8 @@ func checkPeers(masterIp string, masterPort int, masterGrpcPort int, peers strin
}
func isTheFirstOne(self pb.ServerAddress, peers []pb.ServerAddress) bool {
slices.SortFunc(peers, func(a, b pb.ServerAddress) bool {
return strings.Compare(string(a), string(b)) < 0
slices.SortFunc(peers, func(a, b pb.ServerAddress) int {
return strings.Compare(string(a), string(b))
})
if len(peers) <= 0 {
return true

View file

@ -73,11 +73,11 @@ func TestCompactFileChunksRealCase(t *testing.T) {
}
func printChunks(name string, chunks []*filer_pb.FileChunk) {
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) bool {
slices.SortFunc(chunks, func(a, b *filer_pb.FileChunk) int {
if a.Offset == b.Offset {
return a.ModifiedTsNs < b.ModifiedTsNs
return int(a.ModifiedTsNs - b.ModifiedTsNs)
}
return a.Offset < b.Offset
return int(a.Offset - b.Offset)
})
for _, chunk := range chunks {
glog.V(0).Infof("%s chunk %s [%10d,%10d)", name, chunk.GetFileIdString(), chunk.Offset, chunk.Offset+int64(chunk.Size))

View file

@ -30,14 +30,20 @@ func readResolvedChunks(chunks []*filer_pb.FileChunk, startOffset int64, stopOff
isStart: false,
})
}
slices.SortFunc(points, func(a, b *Point) bool {
slices.SortFunc(points, func(a, b *Point) int {
if a.x != b.x {
return a.x < b.x
return int(a.x - b.x)
}
if a.ts != b.ts {
return a.ts < b.ts
return int(a.ts - b.ts)
}
return !a.isStart
if a.isStart {
return -1
}
if b.isStart {
return 1
}
return 0
})
var prevX int64

View file

@ -164,8 +164,8 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
}
// sort
slices.SortFunc(members, func(a, b string) bool {
return strings.Compare(a, b) < 0
slices.SortFunc(members, func(a, b string) int {
return strings.Compare(a, b)
})
// limit

View file

@ -45,11 +45,11 @@ func isSameChunks(a, b []*filer_pb.FileChunk) bool {
if len(a) != len(b) {
return false
}
slices.SortFunc(a, func(i, j *filer_pb.FileChunk) bool {
return strings.Compare(i.ETag, j.ETag) < 0
slices.SortFunc(a, func(i, j *filer_pb.FileChunk) int {
return strings.Compare(i.ETag, j.ETag)
})
slices.SortFunc(b, func(i, j *filer_pb.FileChunk) bool {
return strings.Compare(i.ETag, j.ETag) < 0
slices.SortFunc(b, func(i, j *filer_pb.FileChunk) int {
return strings.Compare(i.ETag, j.ETag)
})
for i := 0; i < len(a); i++ {
if a[i].ETag != b[i].ETag {

View file

@ -72,8 +72,8 @@ func (s3a *S3ApiServer) completeMultipartUpload(input *s3.CompleteMultipartUploa
glog.V(2).Infof("completeMultipartUpload input %v", input)
completedParts := parts.Parts
slices.SortFunc(completedParts, func(a, b CompletedPart) bool {
return a.PartNumber < b.PartNumber
slices.SortFunc(completedParts, func(a, b CompletedPart) int {
return a.PartNumber - b.PartNumber
})
uploadDirectory := s3a.genUploadsFolder(*input.Bucket) + "/" + *input.UploadId

View file

@ -334,8 +334,8 @@ func (s3a *S3ApiServer) doDeleteEmptyDirectories(client filer_pb.SeaweedFilerCli
for dir := range directoriesWithDeletion {
allDirs = append(allDirs, dir)
}
slices.SortFunc(allDirs, func(a, b string) bool {
return len(a) > len(b)
slices.SortFunc(allDirs, func(a, b string) int {
return len(b) - len(a)
})
newDirectoriesWithDeletion = make(map[string]int)
for _, dir := range allDirs {

View file

@ -135,8 +135,8 @@ func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Reque
fs.filer.DeleteChunks(fileChunks)
return nil, md5Hash, 0, uploadErr, nil
}
slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) bool {
return a.Offset < b.Offset
slices.SortFunc(fileChunks, func(a, b *filer_pb.FileChunk) int {
return int(a.Offset - b.Offset)
})
return fileChunks, md5Hash, chunkOffset, nil, smallContent
}

View file

@ -411,8 +411,8 @@ func doBalanceEcRack(commandEnv *CommandEnv, ecRack *EcRack, applyBalancing bool
hasMove := true
for hasMove {
hasMove = false
slices.SortFunc(rackEcNodes, func(a, b *EcNode) bool {
return a.freeEcSlot > b.freeEcSlot
slices.SortFunc(rackEcNodes, func(a, b *EcNode) int {
return b.freeEcSlot - a.freeEcSlot
})
emptyNode, fullNode := rackEcNodes[0], rackEcNodes[len(rackEcNodes)-1]
emptyNodeShardCount, fullNodeShardCount := ecNodeIdToShardCount[emptyNode.info.Id], ecNodeIdToShardCount[fullNode.info.Id]
@ -492,8 +492,8 @@ func pickNEcShardsToMoveFrom(ecNodes []*EcNode, vid needle.VolumeId, n int) map[
})
}
}
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) bool {
return a.shardCount > b.shardCount
slices.SortFunc(candidateEcNodes, func(a, b *CandidateEcNode) int {
return b.shardCount - a.shardCount
})
for i := 0; i < n; i++ {
selectedEcNodeIndex := -1

View file

@ -119,14 +119,14 @@ func eachDataNode(topo *master_pb.TopologyInfo, fn func(dc string, rack RackId,
}
func sortEcNodesByFreeslotsDescending(ecNodes []*EcNode) {
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
return a.freeEcSlot > b.freeEcSlot
slices.SortFunc(ecNodes, func(a, b *EcNode) int {
return b.freeEcSlot - a.freeEcSlot
})
}
func sortEcNodesByFreeslotsAscending(ecNodes []*EcNode) {
slices.SortFunc(ecNodes, func(a, b *EcNode) bool {
return a.freeEcSlot < b.freeEcSlot
slices.SortFunc(ecNodes, func(a, b *EcNode) int {
return a.freeEcSlot - b.freeEcSlot
})
}

View file

@ -243,8 +243,8 @@ func (n *Node) selectVolumes(fn func(v *master_pb.VolumeInformationMessage) bool
}
func sortWritableVolumes(volumes []*master_pb.VolumeInformationMessage) {
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) bool {
return a.Size < b.Size
slices.SortFunc(volumes, func(a, b *master_pb.VolumeInformationMessage) int {
return int(a.Size - b.Size)
})
}
@ -269,8 +269,8 @@ func balanceSelectedVolume(commandEnv *CommandEnv, diskType types.DiskType, volu
for hasMoved {
hasMoved = false
slices.SortFunc(nodesWithCapacity, func(a, b *Node) bool {
return a.localVolumeRatio(capacityFunc) < b.localVolumeRatio(capacityFunc)
slices.SortFunc(nodesWithCapacity, func(a, b *Node) int {
return int(a.localVolumeRatio(capacityFunc) - b.localVolumeRatio(capacityFunc))
})
if len(nodesWithCapacity) == 0 {
fmt.Printf("no volume server found with capacity for %s", diskType.ReadableString())

View file

@ -80,8 +80,8 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
if *volumeId > 0 && replicas[0].info.Id != uint32(*volumeId) {
continue
}
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
return fileCount(a) > fileCount(b)
slices.SortFunc(replicas, func(a, b *VolumeReplica) int {
return int(fileCount(b) - fileCount(a))
})
for len(replicas) >= 2 {
a, b := replicas[0], replicas[1]

View file

@ -328,8 +328,8 @@ func (c *commandVolumeFixReplication) fixOneUnderReplicatedVolume(commandEnv *Co
func keepDataNodesSorted(dataNodes []location, diskType types.DiskType) {
fn := capacityByFreeVolumeCount(diskType)
slices.SortFunc(dataNodes, func(a, b location) bool {
return fn(a.dataNode) > fn(b.dataNode)
slices.SortFunc(dataNodes, func(a, b location) int {
return int(fn(b.dataNode) - fn(a.dataNode))
})
}
@ -514,17 +514,17 @@ func countReplicas(replicas []*VolumeReplica) (diffDc, diffRack, diffNode map[st
}
func pickOneReplicaToDelete(replicas []*VolumeReplica, replicaPlacement *super_block.ReplicaPlacement) *VolumeReplica {
slices.SortFunc(replicas, func(a, b *VolumeReplica) bool {
slices.SortFunc(replicas, func(a, b *VolumeReplica) int {
if a.info.Size != b.info.Size {
return a.info.Size < b.info.Size
return int(a.info.Size - b.info.Size)
}
if a.info.ModifiedAtSecond != b.info.ModifiedAtSecond {
return a.info.ModifiedAtSecond < b.info.ModifiedAtSecond
return int(a.info.ModifiedAtSecond - b.info.ModifiedAtSecond)
}
if a.info.CompactRevision != b.info.CompactRevision {
return a.info.CompactRevision < b.info.CompactRevision
return int(a.info.CompactRevision - b.info.CompactRevision)
}
return false
return 0
})
return replicas[0]

View file

@ -8,6 +8,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"golang.org/x/exp/slices"
"path/filepath"
"strings"
"io"
)
@ -81,8 +82,8 @@ func diskInfoToString(diskInfo *master_pb.DiskInfo) string {
func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.TopologyInfo, volumeSizeLimitMb uint64, verbosityLevel int) statistics {
output(verbosityLevel >= 0, writer, "Topology volumeSizeLimit:%d MB%s\n", volumeSizeLimitMb, diskInfosToString(t.DiskInfos))
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) bool {
return a.Id < b.Id
slices.SortFunc(t.DataCenterInfos, func(a, b *master_pb.DataCenterInfo) int {
return strings.Compare(a.Id, b.Id)
})
var s statistics
for _, dc := range t.DataCenterInfos {
@ -98,8 +99,8 @@ func (c *commandVolumeList) writeTopologyInfo(writer io.Writer, t *master_pb.Top
func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.DataCenterInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 1, writer, " DataCenter %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) bool {
return a.Id < b.Id
slices.SortFunc(t.RackInfos, func(a, b *master_pb.RackInfo) int {
return strings.Compare(a.Id, b.Id)
})
for _, r := range t.RackInfos {
if *c.rack != "" && *c.rack != r.Id {
@ -114,8 +115,8 @@ func (c *commandVolumeList) writeDataCenterInfo(writer io.Writer, t *master_pb.D
func (c *commandVolumeList) writeRackInfo(writer io.Writer, t *master_pb.RackInfo, verbosityLevel int) statistics {
output(verbosityLevel >= 2, writer, " Rack %s%s\n", t.Id, diskInfosToString(t.DiskInfos))
var s statistics
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) bool {
return a.Id < b.Id
slices.SortFunc(t.DataNodeInfos, func(a, b *master_pb.DataNodeInfo) int {
return strings.Compare(a.Id, b.Id)
})
for _, dn := range t.DataNodeInfos {
if *c.dataNode != "" && *c.dataNode != dn.Id {
@ -159,8 +160,8 @@ func (c *commandVolumeList) writeDiskInfo(writer io.Writer, t *master_pb.DiskInf
diskType = "hdd"
}
output(verbosityLevel >= 4, writer, " Disk %s(%s)\n", diskType, diskInfoToString(t))
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) bool {
return a.Id < b.Id
slices.SortFunc(t.VolumeInfos, func(a, b *master_pb.VolumeInformationMessage) int {
return int(a.Id - b.Id)
})
for _, vi := range t.VolumeInfos {
if c.isNotMatchDiskInfo(vi.ReadOnly, vi.Collection, vi.Id) {

View file

@ -179,8 +179,8 @@ func (c *commandVolumeServerEvacuate) evacuateEcVolumes(commandEnv *CommandEnv,
func (c *commandVolumeServerEvacuate) moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEcShardInformationMessage, thisNode *EcNode, otherNodes []*EcNode, applyChange bool) (hasMoved bool, err error) {
for _, shardId := range erasure_coding.ShardBits(ecShardInfo.EcIndexBits).ShardIds() {
slices.SortFunc(otherNodes, func(a, b *EcNode) bool {
return a.localShardIdCount(ecShardInfo.Id) < b.localShardIdCount(ecShardInfo.Id)
slices.SortFunc(otherNodes, func(a, b *EcNode) int {
return a.localShardIdCount(ecShardInfo.Id) - b.localShardIdCount(ecShardInfo.Id)
})
for i := 0; i < len(otherNodes); i++ {
emptyNode := otherNodes[i]
@ -214,8 +214,8 @@ func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][
})
}
// most empty one is in the front
slices.SortFunc(otherNodes, func(a, b *Node) bool {
return a.localVolumeRatio(maxVolumeCountFn) < b.localVolumeRatio(maxVolumeCountFn)
slices.SortFunc(otherNodes, func(a, b *Node) int {
return int(a.localVolumeRatio(maxVolumeCountFn) - b.localVolumeRatio(maxVolumeCountFn))
})
for i := 0; i < len(otherNodes); i++ {
emptyNode := otherNodes[i]

View file

@ -26,8 +26,8 @@ var (
)
func RunShell(options ShellOptions) {
slices.SortFunc(Commands, func(a, b command) bool {
return strings.Compare(a.Name(), b.Name()) < 0
slices.SortFunc(Commands, func(a, b command) int {
return strings.Compare(a.Name(), b.Name())
})
line = liner.NewLiner()
defer line.Close()

View file

@ -7,6 +7,7 @@ import (
"path"
"regexp"
"strconv"
"strings"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
@ -144,8 +145,8 @@ func (l *DiskLocation) loadAllEcShards() (err error) {
}
dirEntries = append(dirEntries, indexDirEntries...)
}
slices.SortFunc(dirEntries, func(a, b os.DirEntry) bool {
return a.Name() < b.Name()
slices.SortFunc(dirEntries, func(a, b os.DirEntry) int {
return strings.Compare(a.Name(), b.Name())
})
var sameVolumeShards []string
var prevVolumeId needle.VolumeId

View file

@ -84,8 +84,11 @@ func (ev *EcVolume) AddEcVolumeShard(ecVolumeShard *EcVolumeShard) bool {
}
}
ev.Shards = append(ev.Shards, ecVolumeShard)
slices.SortFunc(ev.Shards, func(a, b *EcVolumeShard) bool {
return a.VolumeId < b.VolumeId || a.VolumeId == b.VolumeId && a.ShardId < b.ShardId
slices.SortFunc(ev.Shards, func(a, b *EcVolumeShard) int {
if a.VolumeId != b.VolumeId {
return int(a.VolumeId - b.VolumeId)
}
return int(a.ShardId - b.ShardId)
})
return true
}

View file

@ -400,8 +400,8 @@ func (s *Store) EcVolumes() (ecVolumes []*erasure_coding.EcVolume) {
}
location.ecVolumesLock.RUnlock()
}
slices.SortFunc(ecVolumes, func(a, b *erasure_coding.EcVolume) bool {
return a.VolumeId > b.VolumeId
slices.SortFunc(ecVolumes, func(a, b *erasure_coding.EcVolume) int {
return int(b.VolumeId - a.VolumeId)
})
return ecVolumes
}

View file

@ -3,6 +3,7 @@ package topology
import (
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"golang.org/x/exp/slices"
"strings"
)
type DataCenter struct {
@ -46,8 +47,8 @@ func (dc *DataCenter) ToInfo() (info DataCenterInfo) {
racks = append(racks, rack.ToInfo())
}
slices.SortFunc(racks, func(a, b RackInfo) bool {
return a.Id < b.Id
slices.SortFunc(racks, func(a, b RackInfo) int {
return strings.Compare(string(a.Id), string(b.Id))
})
info.Racks = racks
return

View file

@ -5,6 +5,7 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/types"
"github.com/seaweedfs/seaweedfs/weed/util"
"golang.org/x/exp/slices"
"strings"
"time"
)
@ -69,8 +70,8 @@ func (r *Rack) ToInfo() (info RackInfo) {
dns = append(dns, dn.ToInfo())
}
slices.SortFunc(dns, func(a, b DataNodeInfo) bool {
return a.Url < b.Url
slices.SortFunc(dns, func(a, b DataNodeInfo) int {
return strings.Compare(a.Url, b.Url)
})
info.DataNodes = dns

View file

@ -3,6 +3,7 @@ package topology
import (
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"golang.org/x/exp/slices"
"strings"
)
type TopologyInfo struct {
@ -21,8 +22,8 @@ func (t *Topology) ToInfo() (info TopologyInfo) {
dcs = append(dcs, dc.ToInfo())
}
slices.SortFunc(dcs, func(a, b DataCenterInfo) bool {
return a.Id < b.Id
slices.SortFunc(dcs, func(a, b DataCenterInfo) int {
return strings.Compare(string(a.Id), string(b.Id))
})
info.DataCenters = dcs

View file

@ -32,8 +32,8 @@ func NewOnDiskCacheLayer(dir, namePrefix string, diskSize int64, segmentCount in
}
// keep newest cache to the front
slices.SortFunc(c.diskCaches, func(a, b *ChunkCacheVolume) bool {
return a.lastModTime.After(b.lastModTime)
slices.SortFunc(c.diskCaches, func(a, b *ChunkCacheVolume) int {
return b.lastModTime.Compare(a.lastModTime)
})
return c
}