mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-08-17 09:32:48 +02:00
* initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
232 lines
6.2 KiB
Go
232 lines
6.2 KiB
Go
package app
|
|
|
|
import (
|
|
"testing"
|
|
)
|
|
|
|
// Test structs that mirror the actual configuration structure
|
|
type TestBaseConfigForTemplate struct {
|
|
Enabled bool `json:"enabled"`
|
|
ScanIntervalSeconds int `json:"scan_interval_seconds"`
|
|
MaxConcurrent int `json:"max_concurrent"`
|
|
}
|
|
|
|
type TestTaskConfigForTemplate struct {
|
|
TestBaseConfigForTemplate
|
|
TaskSpecificField float64 `json:"task_specific_field"`
|
|
AnotherSpecificField string `json:"another_specific_field"`
|
|
}
|
|
|
|
func TestGetTaskFieldValue_EmbeddedStructFields(t *testing.T) {
|
|
config := &TestTaskConfigForTemplate{
|
|
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
|
|
Enabled: true,
|
|
ScanIntervalSeconds: 2400,
|
|
MaxConcurrent: 5,
|
|
},
|
|
TaskSpecificField: 0.18,
|
|
AnotherSpecificField: "test_value",
|
|
}
|
|
|
|
// Test embedded struct fields
|
|
tests := []struct {
|
|
fieldName string
|
|
expectedValue interface{}
|
|
description string
|
|
}{
|
|
{"enabled", true, "BaseConfig boolean field"},
|
|
{"scan_interval_seconds", 2400, "BaseConfig integer field"},
|
|
{"max_concurrent", 5, "BaseConfig integer field"},
|
|
{"task_specific_field", 0.18, "Task-specific float field"},
|
|
{"another_specific_field", "test_value", "Task-specific string field"},
|
|
}
|
|
|
|
for _, test := range tests {
|
|
t.Run(test.description, func(t *testing.T) {
|
|
result := getTaskFieldValue(config, test.fieldName)
|
|
|
|
if result != test.expectedValue {
|
|
t.Errorf("Field %s: expected %v (%T), got %v (%T)",
|
|
test.fieldName, test.expectedValue, test.expectedValue, result, result)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_NonExistentField(t *testing.T) {
|
|
config := &TestTaskConfigForTemplate{
|
|
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
|
|
Enabled: true,
|
|
ScanIntervalSeconds: 1800,
|
|
MaxConcurrent: 3,
|
|
},
|
|
}
|
|
|
|
result := getTaskFieldValue(config, "non_existent_field")
|
|
|
|
if result != nil {
|
|
t.Errorf("Expected nil for non-existent field, got %v", result)
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_NilConfig(t *testing.T) {
|
|
var config *TestTaskConfigForTemplate = nil
|
|
|
|
result := getTaskFieldValue(config, "enabled")
|
|
|
|
if result != nil {
|
|
t.Errorf("Expected nil for nil config, got %v", result)
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_EmptyStruct(t *testing.T) {
|
|
config := &TestTaskConfigForTemplate{}
|
|
|
|
// Test that we can extract zero values
|
|
tests := []struct {
|
|
fieldName string
|
|
expectedValue interface{}
|
|
description string
|
|
}{
|
|
{"enabled", false, "Zero value boolean"},
|
|
{"scan_interval_seconds", 0, "Zero value integer"},
|
|
{"max_concurrent", 0, "Zero value integer"},
|
|
{"task_specific_field", 0.0, "Zero value float"},
|
|
{"another_specific_field", "", "Zero value string"},
|
|
}
|
|
|
|
for _, test := range tests {
|
|
t.Run(test.description, func(t *testing.T) {
|
|
result := getTaskFieldValue(config, test.fieldName)
|
|
|
|
if result != test.expectedValue {
|
|
t.Errorf("Field %s: expected %v (%T), got %v (%T)",
|
|
test.fieldName, test.expectedValue, test.expectedValue, result, result)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_NonStructConfig(t *testing.T) {
|
|
var config interface{} = "not a struct"
|
|
|
|
result := getTaskFieldValue(config, "enabled")
|
|
|
|
if result != nil {
|
|
t.Errorf("Expected nil for non-struct config, got %v", result)
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_PointerToStruct(t *testing.T) {
|
|
config := &TestTaskConfigForTemplate{
|
|
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
|
|
Enabled: false,
|
|
ScanIntervalSeconds: 900,
|
|
MaxConcurrent: 2,
|
|
},
|
|
TaskSpecificField: 0.35,
|
|
}
|
|
|
|
// Test that pointers are handled correctly
|
|
enabledResult := getTaskFieldValue(config, "enabled")
|
|
if enabledResult != false {
|
|
t.Errorf("Expected false for enabled field, got %v", enabledResult)
|
|
}
|
|
|
|
intervalResult := getTaskFieldValue(config, "scan_interval_seconds")
|
|
if intervalResult != 900 {
|
|
t.Errorf("Expected 900 for scan_interval_seconds field, got %v", intervalResult)
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_FieldsWithJSONOmitempty(t *testing.T) {
|
|
// Test struct with omitempty tags
|
|
type TestConfigWithOmitempty struct {
|
|
TestBaseConfigForTemplate
|
|
OptionalField string `json:"optional_field,omitempty"`
|
|
}
|
|
|
|
config := &TestConfigWithOmitempty{
|
|
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
|
|
Enabled: true,
|
|
ScanIntervalSeconds: 1200,
|
|
MaxConcurrent: 4,
|
|
},
|
|
OptionalField: "optional_value",
|
|
}
|
|
|
|
// Test that fields with omitempty are still found
|
|
result := getTaskFieldValue(config, "optional_field")
|
|
if result != "optional_value" {
|
|
t.Errorf("Expected 'optional_value' for optional_field, got %v", result)
|
|
}
|
|
|
|
// Test embedded fields still work
|
|
enabledResult := getTaskFieldValue(config, "enabled")
|
|
if enabledResult != true {
|
|
t.Errorf("Expected true for enabled field, got %v", enabledResult)
|
|
}
|
|
}
|
|
|
|
func TestGetTaskFieldValue_DeepEmbedding(t *testing.T) {
|
|
// Test with multiple levels of embedding
|
|
type DeepBaseConfig struct {
|
|
DeepField string `json:"deep_field"`
|
|
}
|
|
|
|
type MiddleConfig struct {
|
|
DeepBaseConfig
|
|
MiddleField int `json:"middle_field"`
|
|
}
|
|
|
|
type TopConfig struct {
|
|
MiddleConfig
|
|
TopField bool `json:"top_field"`
|
|
}
|
|
|
|
config := &TopConfig{
|
|
MiddleConfig: MiddleConfig{
|
|
DeepBaseConfig: DeepBaseConfig{
|
|
DeepField: "deep_value",
|
|
},
|
|
MiddleField: 123,
|
|
},
|
|
TopField: true,
|
|
}
|
|
|
|
// Test that deeply embedded fields are found
|
|
deepResult := getTaskFieldValue(config, "deep_field")
|
|
if deepResult != "deep_value" {
|
|
t.Errorf("Expected 'deep_value' for deep_field, got %v", deepResult)
|
|
}
|
|
|
|
middleResult := getTaskFieldValue(config, "middle_field")
|
|
if middleResult != 123 {
|
|
t.Errorf("Expected 123 for middle_field, got %v", middleResult)
|
|
}
|
|
|
|
topResult := getTaskFieldValue(config, "top_field")
|
|
if topResult != true {
|
|
t.Errorf("Expected true for top_field, got %v", topResult)
|
|
}
|
|
}
|
|
|
|
// Benchmark to ensure performance is reasonable
|
|
func BenchmarkGetTaskFieldValue(b *testing.B) {
|
|
config := &TestTaskConfigForTemplate{
|
|
TestBaseConfigForTemplate: TestBaseConfigForTemplate{
|
|
Enabled: true,
|
|
ScanIntervalSeconds: 1800,
|
|
MaxConcurrent: 3,
|
|
},
|
|
TaskSpecificField: 0.25,
|
|
AnotherSpecificField: "benchmark_test",
|
|
}
|
|
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
// Test both embedded and regular fields
|
|
_ = getTaskFieldValue(config, "enabled")
|
|
_ = getTaskFieldValue(config, "task_specific_field")
|
|
}
|
|
}
|