mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-08-16 09:02:47 +02:00
* initial design * added simulation as tests * reorganized the codebase to move the simulation framework and tests into their own dedicated package * integration test. ec worker task * remove "enhanced" reference * start master, volume servers, filer Current Status ✅ Master: Healthy and running (port 9333) ✅ Filer: Healthy and running (port 8888) ✅ Volume Servers: All 6 servers running (ports 8080-8085) 🔄 Admin/Workers: Will start when dependencies are ready * generate write load * tasks are assigned * admin start wtih grpc port. worker has its own working directory * Update .gitignore * working worker and admin. Task detection is not working yet. * compiles, detection uses volumeSizeLimitMB from master * compiles * worker retries connecting to admin * build and restart * rendering pending tasks * skip task ID column * sticky worker id * test canScheduleTaskNow * worker reconnect to admin * clean up logs * worker register itself first * worker can run ec work and report status but: 1. one volume should not be repeatedly worked on. 2. ec shards needs to be distributed and source data should be deleted. * move ec task logic * listing ec shards * local copy, ec. Need to distribute. * ec is mostly working now * distribution of ec shards needs improvement * need configuration to enable ec * show ec volumes * interval field UI component * rename * integration test with vauuming * garbage percentage threshold * fix warning * display ec shard sizes * fix ec volumes list * Update ui.go * show default values * ensure correct default value * MaintenanceConfig use ConfigField * use schema defined defaults * config * reduce duplication * refactor to use BaseUIProvider * each task register its schema * checkECEncodingCandidate use ecDetector * use vacuumDetector * use volumeSizeLimitMB * remove remove * remove unused * refactor * use new framework * remove v2 reference * refactor * left menu can scroll now * The maintenance manager was not being initialized when no data directory was configured for persistent storage. * saving config * Update task_config_schema_templ.go * enable/disable tasks * protobuf encoded task configurations * fix system settings * use ui component * remove logs * interface{} Reduction * reduce interface{} * reduce interface{} * avoid from/to map * reduce interface{} * refactor * keep it DRY * added logging * debug messages * debug level * debug * show the log caller line * use configured task policy * log level * handle admin heartbeat response * Update worker.go * fix EC rack and dc count * Report task status to admin server * fix task logging, simplify interface checking, use erasure_coding constants * factor in empty volume server during task planning * volume.list adds disk id * track disk id also * fix locking scheduled and manual scanning * add active topology * simplify task detector * ec task completed, but shards are not showing up * implement ec in ec_typed.go * adjust log level * dedup * implementing ec copying shards and only ecx files * use disk id when distributing ec shards 🎯 Planning: ActiveTopology creates DestinationPlan with specific TargetDisk 📦 Task Creation: maintenance_integration.go creates ECDestination with DiskId 🚀 Task Execution: EC task passes DiskId in VolumeEcShardsCopyRequest 💾 Volume Server: Receives disk_id and stores shards on specific disk (vs.store.Locations[req.DiskId]) 📂 File System: EC shards and metadata land in the exact disk directory planned * Delete original volume from all locations * clean up existing shard locations * local encoding and distributing * Update docker/admin_integration/EC-TESTING-README.md Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> * check volume id range * simplify * fix tests * fix types * clean up logs and tests --------- Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
131 lines
5 KiB
Makefile
131 lines
5 KiB
Makefile
all: gen
|
|
|
|
.PHONY : gen
|
|
|
|
gen: dev
|
|
|
|
cgo ?= 0
|
|
binary:
|
|
export SWCOMMIT=$(shell git rev-parse --short HEAD)
|
|
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
|
|
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" -o weed_binary && mv weed_binary ../docker/weed
|
|
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
|
|
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/
|
|
|
|
binary_race: options = -race
|
|
binary_race: cgo = 1
|
|
binary_race: binary
|
|
|
|
build: binary
|
|
docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
|
|
|
|
build_e2e: binary_race
|
|
docker build --no-cache -t chrislusf/seaweedfs:e2e -f Dockerfile.e2e .
|
|
|
|
go_build: # make go_build tags=elastic,ydb,gocdk,hdfs,5BytesOffset,tarantool
|
|
docker build --build-arg TAGS=$(tags) --no-cache -t chrislusf/seaweedfs:go_build -f Dockerfile.go_build .
|
|
|
|
go_build_large_disk:
|
|
docker build --build-arg TAGS=large_disk --no-cache -t chrislusf/seaweedfs:large_disk -f Dockerfile.go_build .
|
|
|
|
build_rocksdb_dev_env:
|
|
docker build --no-cache -t chrislusf/rocksdb_dev_env -f Dockerfile.rocksdb_dev_env .
|
|
|
|
build_rocksdb_local: build_rocksdb_dev_env
|
|
cd .. ; docker build --no-cache -t chrislusf/seaweedfs:rocksdb_local -f docker/Dockerfile.rocksdb_large_local .
|
|
|
|
build_rocksdb:
|
|
docker build --no-cache -t chrislusf/seaweedfs:rocksdb -f Dockerfile.rocksdb_large .
|
|
|
|
build_tarantool_dev_env:
|
|
docker build --no-cache -t chrislusf/tarantool_dev_env -f Dockerfile.tarantool.dev_env .
|
|
|
|
s3tests_build:
|
|
docker build --no-cache -t chrislusf/ceph-s3-tests:local -f Dockerfile.s3tests .
|
|
|
|
dev: build
|
|
docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
|
|
|
dev_race: binary_race
|
|
docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
|
|
|
dev_tls: build certstrap
|
|
ENV_FILE="tls.env" docker compose -f compose/local-dev-compose.yml -p seaweedfs up
|
|
|
|
dev_mount: build
|
|
docker compose -f compose/local-mount-compose.yml -p seaweedfs up
|
|
|
|
run_image: build
|
|
docker run --rm -ti --device /dev/fuse --cap-add SYS_ADMIN --entrypoint /bin/sh chrislusf/seaweedfs:local
|
|
|
|
profile_mount: build
|
|
docker compose -f compose/local-mount-profile-compose.yml -p seaweedfs up
|
|
|
|
k8s: build
|
|
docker compose -f compose/local-k8s-compose.yml -p seaweedfs up
|
|
|
|
dev_registry: build
|
|
docker compose -f compose/local-registry-compose.yml -p seaweedfs up
|
|
|
|
dev_replicate:
|
|
docker build --build-arg TAGS=gocdk --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.go_build .
|
|
docker compose -f compose/local-replicate-compose.yml -p seaweedfs up
|
|
|
|
dev_auditlog: build
|
|
docker compose -f compose/local-auditlog-compose.yml -p seaweedfs up
|
|
|
|
dev_nextcloud: build
|
|
docker compose -f compose/local-nextcloud-compose.yml -p seaweedfs up
|
|
|
|
cluster: build
|
|
docker compose -f compose/local-cluster-compose.yml -p seaweedfs up
|
|
|
|
2clusters: build
|
|
docker compose -f compose/local-clusters-compose.yml -p seaweedfs up
|
|
|
|
2mount: build
|
|
docker compose -f compose/local-sync-mount-compose.yml -p seaweedfs up
|
|
|
|
filer_backup: build
|
|
docker compose -f compose/local-filer-backup-compose.yml -p seaweedfs up
|
|
|
|
hashicorp_raft: build
|
|
docker compose -f compose/local-hashicorp-raft-compose.yml -p seaweedfs up
|
|
|
|
s3tests: build s3tests_build
|
|
docker compose -f compose/local-s3tests-compose.yml -p seaweedfs up
|
|
|
|
brokers: build
|
|
docker compose -f compose/local-brokers-compose.yml -p seaweedfs up
|
|
|
|
agent: build
|
|
docker compose -f compose/local-mq-test.yml -p seaweedfs up
|
|
|
|
filer_etcd: build
|
|
docker stack deploy -c compose/swarm-etcd.yml fs
|
|
|
|
test_etcd: build
|
|
docker compose -f compose/test-etcd-filer.yml -p seaweedfs up
|
|
|
|
test_ydb: tags = ydb
|
|
test_ydb: build
|
|
docker compose -f compose/test-ydb-filer.yml -p seaweedfs up
|
|
|
|
test_tarantool: tags = tarantool
|
|
test_tarantool: build_tarantool_dev_env build
|
|
docker compose -f compose/test-tarantool-filer.yml -p seaweedfs up
|
|
|
|
clean:
|
|
rm ./weed
|
|
|
|
certstrap:
|
|
go install -v github.com/square/certstrap@latest
|
|
certstrap --depot-path compose/tls init --curve P-256 --passphrase "" --common-name "SeaweedFS CA" || true
|
|
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --domain localhost --common-name volume01.dev || true
|
|
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name master01.dev || true
|
|
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name filer01.dev || true
|
|
certstrap --depot-path compose/tls request-cert --ou "SeaweedFS" --curve P-256 --passphrase "" --common-name client01.dev || true
|
|
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" volume01.dev || true
|
|
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" master01.dev || true
|
|
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" filer01.dev || true
|
|
certstrap --depot-path compose/tls sign --CA "SeaweedFS CA" client01.dev || true
|