mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-08-16 17:12:46 +02:00
* initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
121 lines
3.4 KiB
Go
121 lines
3.4 KiB
Go
package storage
|
|
|
|
import (
|
|
"fmt"
|
|
"sort"
|
|
|
|
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/needle"
|
|
"github.com/seaweedfs/seaweedfs/weed/storage/super_block"
|
|
)
|
|
|
|
type VolumeInfo struct {
|
|
Id needle.VolumeId
|
|
Size uint64
|
|
ReplicaPlacement *super_block.ReplicaPlacement
|
|
Ttl *needle.TTL
|
|
DiskType string
|
|
DiskId uint32
|
|
Collection string
|
|
Version needle.Version
|
|
FileCount int
|
|
DeleteCount int
|
|
DeletedByteCount uint64
|
|
ReadOnly bool
|
|
CompactRevision uint32
|
|
ModifiedAtSecond int64
|
|
RemoteStorageName string
|
|
RemoteStorageKey string
|
|
}
|
|
|
|
func NewVolumeInfo(m *master_pb.VolumeInformationMessage) (vi VolumeInfo, err error) {
|
|
vi = VolumeInfo{
|
|
Id: needle.VolumeId(m.Id),
|
|
Size: m.Size,
|
|
Collection: m.Collection,
|
|
FileCount: int(m.FileCount),
|
|
DeleteCount: int(m.DeleteCount),
|
|
DeletedByteCount: m.DeletedByteCount,
|
|
ReadOnly: m.ReadOnly,
|
|
Version: needle.Version(m.Version),
|
|
CompactRevision: m.CompactRevision,
|
|
ModifiedAtSecond: m.ModifiedAtSecond,
|
|
RemoteStorageName: m.RemoteStorageName,
|
|
RemoteStorageKey: m.RemoteStorageKey,
|
|
DiskType: m.DiskType,
|
|
DiskId: m.DiskId,
|
|
}
|
|
rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
|
|
if e != nil {
|
|
return vi, e
|
|
}
|
|
vi.ReplicaPlacement = rp
|
|
vi.Ttl = needle.LoadTTLFromUint32(m.Ttl)
|
|
return vi, nil
|
|
}
|
|
|
|
func NewVolumeInfoFromShort(m *master_pb.VolumeShortInformationMessage) (vi VolumeInfo, err error) {
|
|
vi = VolumeInfo{
|
|
Id: needle.VolumeId(m.Id),
|
|
Collection: m.Collection,
|
|
Version: needle.Version(m.Version),
|
|
}
|
|
rp, e := super_block.NewReplicaPlacementFromByte(byte(m.ReplicaPlacement))
|
|
if e != nil {
|
|
return vi, e
|
|
}
|
|
vi.ReplicaPlacement = rp
|
|
vi.Ttl = needle.LoadTTLFromUint32(m.Ttl)
|
|
vi.DiskType = m.DiskType
|
|
return vi, nil
|
|
}
|
|
|
|
func (vi VolumeInfo) IsRemote() bool {
|
|
return vi.RemoteStorageName != ""
|
|
}
|
|
|
|
func (vi VolumeInfo) String() string {
|
|
return fmt.Sprintf("Id:%d, Size:%d, ReplicaPlacement:%s, Collection:%s, Version:%v, FileCount:%d, DeleteCount:%d, DeletedByteCount:%d, ReadOnly:%v",
|
|
vi.Id, vi.Size, vi.ReplicaPlacement, vi.Collection, vi.Version, vi.FileCount, vi.DeleteCount, vi.DeletedByteCount, vi.ReadOnly)
|
|
}
|
|
|
|
func (vi VolumeInfo) ToVolumeInformationMessage() *master_pb.VolumeInformationMessage {
|
|
return &master_pb.VolumeInformationMessage{
|
|
Id: uint32(vi.Id),
|
|
Size: uint64(vi.Size),
|
|
Collection: vi.Collection,
|
|
FileCount: uint64(vi.FileCount),
|
|
DeleteCount: uint64(vi.DeleteCount),
|
|
DeletedByteCount: vi.DeletedByteCount,
|
|
ReadOnly: vi.ReadOnly,
|
|
ReplicaPlacement: uint32(vi.ReplicaPlacement.Byte()),
|
|
Version: uint32(vi.Version),
|
|
Ttl: vi.Ttl.ToUint32(),
|
|
CompactRevision: vi.CompactRevision,
|
|
ModifiedAtSecond: vi.ModifiedAtSecond,
|
|
RemoteStorageName: vi.RemoteStorageName,
|
|
RemoteStorageKey: vi.RemoteStorageKey,
|
|
DiskType: vi.DiskType,
|
|
DiskId: vi.DiskId,
|
|
}
|
|
}
|
|
|
|
/*VolumesInfo sorting*/
|
|
|
|
type volumeInfos []*VolumeInfo
|
|
|
|
func (vis volumeInfos) Len() int {
|
|
return len(vis)
|
|
}
|
|
|
|
func (vis volumeInfos) Less(i, j int) bool {
|
|
return vis[i].Id < vis[j].Id
|
|
}
|
|
|
|
func (vis volumeInfos) Swap(i, j int) {
|
|
vis[i], vis[j] = vis[j], vis[i]
|
|
}
|
|
|
|
func sortVolumeInfos(vis volumeInfos) {
|
|
sort.Sort(vis)
|
|
}
|