1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2025-08-16 09:02:47 +02:00

Compare commits

...

67 commits
3.96 ... master

Author SHA1 Message Date
chrislu
6d265cc74b ensure correct next pointer 2025-08-15 10:35:33 -07:00
chrislu
62ed2366f3 skip file with invalid format 2025-08-15 10:35:33 -07:00
chrislu
8eb85415fb correct error 2025-08-15 10:35:33 -07:00
chrislu
9843a10f2b fix stop time 2025-08-15 10:35:33 -07:00
chrislu
d5ded63ce3 day-advance fix 2025-08-15 10:35:33 -07:00
chrislu
fdfa089754 fix ListAllMyBucketsResult xmlns
fix https://github.com/seaweedfs/seaweedfs/issues/6676
2025-08-14 20:38:03 -07:00
chrislu
80db6f4d79 reduce lock scope to improve log buffer performance 2025-08-14 20:38:03 -07:00
Lisandro Pin
18a22177b9
Fix volume server's status code returned for missing needles on EC-encoded shards (#7137) 2025-08-14 06:35:55 -07:00
dependabot[bot]
3729e9ba25
chore(deps): bump golang.org/x/image from 0.29.0 to 0.30.0 (#7129)
Bumps [golang.org/x/image](https://github.com/golang/image) from 0.29.0 to 0.30.0.
- [Commits](https://github.com/golang/image/compare/v0.29.0...v0.30.0)

---
updated-dependencies:
- dependency-name: golang.org/x/image
  dependency-version: 0.30.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 10:20:23 -07:00
dependabot[bot]
890b51eaef
chore(deps): bump golang.org/x/tools from 0.35.0 to 0.36.0 (#7128)
Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.35.0 to 0.36.0.
- [Release notes](https://github.com/golang/tools/releases)
- [Commits](https://github.com/golang/tools/compare/v0.35.0...v0.36.0)

---
updated-dependencies:
- dependency-name: golang.org/x/tools
  dependency-version: 0.36.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-12 10:20:09 -07:00
chrislu
6bf5a6871c fix presigned signature 2025-08-11 23:57:59 -07:00
dependabot[bot]
3864f89f92
chore(deps): bump golang.org/x/crypto from 0.40.0 to 0.41.0 (#7132)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.40.0 to 0.41.0.
- [Commits](https://github.com/golang/crypto/compare/v0.40.0...v0.41.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-version: 0.41.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 23:35:09 -07:00
dependabot[bot]
d1f18f08d3
chore(deps): bump google.golang.org/protobuf from 1.36.6 to 1.36.7 (#7131)
Bumps google.golang.org/protobuf from 1.36.6 to 1.36.7.

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-version: 1.36.7
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 23:34:58 -07:00
chrislu
7889e78f4d use one http client 2025-08-11 16:45:13 -07:00
Chris Lu
af3300e063
filer: server side copying (#7121)
* copy

* address comments

* remove unused functions, reuse http clients

* address hardlink, checking existing directory

* destination is directory

* check for the key's existence in the map first before accessing its members

* address comments

* deep copy remote entry

* address comments

* copying chunks in parallel

* handle manifest chunks

* address comments

* errgroup

* there could be large chunks

* address comments

* address comments
2025-08-11 16:40:46 -07:00
dependabot[bot]
7e86045e22
chore(deps): bump golang.org/x/net from 0.42.0 to 0.43.0 (#7126)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.42.0 to 0.43.0.
- [Commits](https://github.com/golang/net/compare/v0.42.0...v0.43.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.43.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 15:21:46 -07:00
dependabot[bot]
529c68a798
chore(deps): bump google.golang.org/api from 0.244.0 to 0.246.0 (#7127)
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.244.0 to 0.246.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.244.0...v0.246.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-version: 0.246.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 15:06:25 -07:00
dependabot[bot]
e3b15073a0
chore(deps): bump actions/checkout from 4 to 5 (#7125)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5.
- [Release notes](https://github.com/actions/checkout/releases)
- [Commits](https://github.com/actions/checkout/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 15:06:14 -07:00
dependabot[bot]
5f62b11889
chore(deps): bump golang.org/x/sys from 0.34.0 to 0.35.0 (#7124)
Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.34.0 to 0.35.0.
- [Commits](https://github.com/golang/sys/compare/v0.34.0...v0.35.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-version: 0.35.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 15:06:01 -07:00
dependabot[bot]
524c1916b6
chore(deps): bump github.com/redis/go-redis/v9 from 9.11.0 to 9.12.0 (#7123)
Bumps [github.com/redis/go-redis/v9](https://github.com/redis/go-redis) from 9.11.0 to 9.12.0.
- [Release notes](https://github.com/redis/go-redis/releases)
- [Changelog](https://github.com/redis/go-redis/blob/master/RELEASE-NOTES.md)
- [Commits](https://github.com/redis/go-redis/compare/v9.11.0...v9.12.0)

---
updated-dependencies:
- dependency-name: github.com/redis/go-redis/v9
  dependency-version: 9.12.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 15:05:53 -07:00
dependabot[bot]
412fd6ee86
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3 from 3.113.4 to 3.113.5 (#7122)
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3

Bumps [github.com/ydb-platform/ydb-go-sdk/v3](https://github.com/ydb-platform/ydb-go-sdk) from 3.113.4 to 3.113.5.
- [Release notes](https://github.com/ydb-platform/ydb-go-sdk/releases)
- [Changelog](https://github.com/ydb-platform/ydb-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/ydb-platform/ydb-go-sdk/compare/v3.113.4...v3.113.5)

---
updated-dependencies:
- dependency-name: github.com/ydb-platform/ydb-go-sdk/v3
  dependency-version: 3.113.5
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-11 15:05:44 -07:00
Chris Lu
605b3333c1
fix sftp start with filer (#7120)
* fix sftp start with filer

* add bindIp
2025-08-11 10:56:40 -07:00
Chris Lu
9265e81fe9
S3 API: unsigned streaming (no cred) but chunks contain signatures (#7118)
* This handles the case where we have unsigned streaming (no cred) but chunks contain signatures

* Update chunked_reader_v4.go

* address comments
2025-08-11 10:31:01 -07:00
chrislu
a265a07922 fix ttl objects
fix https://github.com/seaweedfs/seaweedfs/discussions/7107#discussioncomment-14069018
2025-08-11 02:20:02 -07:00
Chris Lu
25bbf4c3d4
Admin UI: Fetch task logs (#7114)
* show task details

* loading tasks

* task UI works

* generic rendering

* rendering the export link

* removing placementConflicts from task parameters

* remove TaskSourceLocation

* remove "Server ID" column

* rendering balance task source

* sources and targets

* fix ec task generation

* move info

* render timeline

* simplified worker id

* simplify

* read task logs from worker

* isValidTaskID

* address comments

* Update weed/worker/tasks/balance/execution.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/worker/tasks/erasure_coding/ec_task.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/worker/tasks/task_log_handler.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix shard ids

* plan distributing shard id

* rendering planned shards in task details

* remove Conflicts

* worker logs correctly

* pass in dc and rack

* task logging

* Update weed/admin/maintenance/maintenance_queue.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* display log details

* logs have fields now

* sort field keys

* fix link

* fix collection filtering

* avoid hard coded ec shard counts

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-09 21:47:29 -07:00
Chris Lu
3ac2a2e22d
fix tikv listing due to expired entries (#7115)
* fix tikv listing due to expired entries

When there are many entries with empty fileName values (which can happen after TTL cleanup), the continue statements prevent the loop counter from incrementing, creating an infinite loop.

* address comments

* Update weed/filer/tikv/tikv_store.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* address comments

Update weed/filer/tikv/tikv_store.go

Co-Authored-By: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-08 12:38:55 -07:00
Devin Lauderdale
fae416586b
Move helm templates into folders (#7113)
* refactor: move helm templates into respective service folders

* fix: update template path reference in filer-statefulset for s3-secret
2025-08-08 10:36:01 -07:00
Devin Lauderdale
92cebe12f0
chore: remove default replica count for all-in-one deployment (#7111) 2025-08-07 21:18:17 -07:00
Chris Lu
535985adb6
Shell: add verbose ec encoding mode (#7105)
* add verbose ec encoding mode

* address comments
2025-08-07 00:12:05 -07:00
Chris Lu
cde2d65c16
ec candidate selection needs to adjust same rack count compare (#7106)
ec needs to adjust same rack count compare
2025-08-07 00:09:51 -07:00
Chris Lu
b4d9618efc
volume server UI: fix ec volume ui (#7104)
* fix ec volume ui

* Update weed/storage/erasure_coding/ec_volume.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-07 00:07:03 -07:00
chrislu
dd4880d55a fix for baidu cloud storage 2025-08-06 20:53:05 -07:00
Chris Lu
4af182f880
Context cancellation during reading range reading large files (#7093)
* context cancellation during reading range reading large files

* address comments

* cancellation for fuse read

* fix cancellation

* pass in context for each function to avoid racing condition

* Update reader_at_test.go

* remove dead code

* Update weed/filer/reader_at.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/filer/filechunk_group.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/filer/filechunk_group.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* address comments

* Update weed/mount/weedfs_file_read.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/mount/weedfs_file_lseek.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/mount/weedfs_file_read.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/filer/reader_at.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/mount/weedfs_file_lseek.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* test cancellation

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-06 10:09:26 -07:00
Chris Lu
e446234e9c
remove spoof-able request header (#7103)
* remove spoof-able request header

https://github.com/seaweedfs/seaweedfs/issues/7094#issuecomment-3158320497

* Update weed/security/guard.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-06 10:08:30 -07:00
Chris Lu
0703308270
remote address parsing should handle special cases (#7101)
* remote address parsing should handle special cases

* handling ipv6

* simplify

* Update weed/security/guard.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/security/guard.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* x-real-ip

* Update guard.go

* fixes

 Hostname Whitelisting: Fully restored - supports localhost, example.com, etc.
 IP Whitelisting: Still works - supports exact IPs and CIDR ranges
 Header Support: Consistent handling of X-Forwarded-For, X-Real-IP

* simplify

* Update weed/security/guard.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update weed/security/guard.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update guard.go

* adjust function signature

* Update weed/security/guard.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* indention

* skip empty host

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-06 01:03:00 -07:00
Chris Lu
c6d9756933
fix signature hashing for iam (#7100)
* fix signature hashing for iam

* add tests

* address comments

* Update weed/s3api/auto_signature_v4_test.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* indention

* fix test

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-05 22:54:54 -07:00
dependabot[bot]
b01b5e0f34
chore(deps): bump github.com/aws/aws-sdk-go-v2/config from 1.29.18 to 1.30.2 (#7099)
chore(deps): bump github.com/aws/aws-sdk-go-v2/config

Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.29.18 to 1.30.2.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.18...v1.30.2)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/config
  dependency-version: 1.30.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 22:06:58 -07:00
dependabot[bot]
315e089d41
chore(deps): bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.71 to 1.18.2 (#7084)
chore(deps): bump github.com/aws/aws-sdk-go-v2/credentials

Bumps [github.com/aws/aws-sdk-go-v2/credentials](https://github.com/aws/aws-sdk-go-v2) from 1.17.71 to 1.18.2.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.18.2/CHANGELOG.md)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.17.71...config/v1.18.2)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/credentials
  dependency-version: 1.18.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 21:22:07 -07:00
dependabot[bot]
ec56e7e8d9
chore(deps): bump github.com/getsentry/sentry-go from 0.34.1 to 0.35.0 (#7098)
Bumps [github.com/getsentry/sentry-go](https://github.com/getsentry/sentry-go) from 0.34.1 to 0.35.0.
- [Release notes](https://github.com/getsentry/sentry-go/releases)
- [Changelog](https://github.com/getsentry/sentry-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/sentry-go/compare/v0.34.1...v0.35.0)

---
updated-dependencies:
- dependency-name: github.com/getsentry/sentry-go
  dependency-version: 0.35.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 14:31:32 -07:00
dependabot[bot]
bc19d144b5
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.84.1 to 1.85.1 (#7083)
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3

Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.84.1 to 1.85.1.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.84.1...service/s3/v1.85.1)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/service/s3
  dependency-version: 1.85.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 14:31:19 -07:00
dependabot[bot]
69fb524e36
chore(deps): bump google.golang.org/api from 0.243.0 to 0.244.0 (#7096)
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.243.0 to 0.244.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.243.0...v0.244.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-version: 0.244.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-05 14:31:08 -07:00
chrislu
2d11e9bc1a Pin helm-gh-pages action to stable version v1.7.0
- Use stable release instead of @master to avoid potential regressions
- This should help resolve the 'gzip: invalid magic' error
2025-08-05 08:42:05 -07:00
chrislu
4f38b7c3b9 Revert "Fix helm-gh-pages action configuration"
This reverts commit 26ef76b7b9.
2025-08-05 08:41:18 -07:00
chrislu
26ef76b7b9 Fix helm-gh-pages action configuration
- Add missing commit_username and commit_email parameters
- This should resolve the 'gzip: invalid magic' error in the Helm GitHub Pages action
2025-08-05 08:39:53 -07:00
Chris Lu
a834327755
context cancellation during reading range reading large files (#7092)
* context cancellation during reading range reading large files

* address comments
2025-08-04 23:22:11 -07:00
dependabot[bot]
375dfe18a4
chore(deps): bump modernc.org/sqlite from 1.38.1 to 1.38.2 (#7091)
Bumps [modernc.org/sqlite](https://gitlab.com/cznic/sqlite) from 1.38.1 to 1.38.2.
- [Commits](https://gitlab.com/cznic/sqlite/compare/v1.38.1...v1.38.2)

---
updated-dependencies:
- dependency-name: modernc.org/sqlite
  dependency-version: 1.38.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 21:30:55 -07:00
dependabot[bot]
ca9da29504
chore(deps): bump github.com/prometheus/client_golang from 1.22.0 to 1.23.0 (#7090)
chore(deps): bump github.com/prometheus/client_golang

Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.22.0 to 1.23.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.22.0...v1.23.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-version: 1.23.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 21:30:48 -07:00
dependabot[bot]
b376eccd22
chore(deps): bump github.com/golang-jwt/jwt/v5 from 5.2.3 to 5.3.0 (#7089)
Bumps [github.com/golang-jwt/jwt/v5](https://github.com/golang-jwt/jwt) from 5.2.3 to 5.3.0.
- [Release notes](https://github.com/golang-jwt/jwt/releases)
- [Changelog](https://github.com/golang-jwt/jwt/blob/main/VERSION_HISTORY.md)
- [Commits](https://github.com/golang-jwt/jwt/compare/v5.2.3...v5.3.0)

---
updated-dependencies:
- dependency-name: github.com/golang-jwt/jwt/v5
  dependency-version: 5.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-04 21:30:39 -07:00
dependabot[bot]
b1ce55291f
chore(deps): bump docker/metadata-action from 5.7.0 to 5.8.0 (#7082) 2025-08-04 19:15:32 -07:00
dependabot[bot]
deb771817f
chore(deps): bump docker/login-action from 3.4.0 to 3.5.0 (#7085) 2025-08-04 19:15:07 -07:00
dependabot[bot]
16a16d79ce
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3 from 3.113.2 to 3.113.4 (#7086) 2025-08-04 19:14:35 -07:00
dependabot[bot]
20694a84f0
chore(deps): bump github.com/aws/aws-sdk-go from 1.55.7 to 1.55.8 (#7087) 2025-08-04 19:14:24 -07:00
dependabot[bot]
fd568cd796
chore(deps): bump cloud.google.com/go/pubsub from 1.49.0 to 1.50.0 (#7088) 2025-08-04 19:14:03 -07:00
Chris Lu
72176601c1
S3: Fix iam payload hash (#7081)
* fix iam payload hash

* streaming hash

* Update weed/s3api/auto_signature_v4_test.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update weed/s3api/auto_signature_v4_test.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* address comments

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-04 09:10:01 -07:00
Chris Lu
365d03ff32
mount ec shards correctly (#7079) 2025-08-03 23:10:28 -07:00
Chris Lu
513ac58504
Filer: fix filer range read (#7078)
* fix filer range read

Only return true if we're reading the ENTIRE chunk from the beginning.
	// This prevents bandwidth amplification when range requests happen to align
	// with chunk boundaries but don't actually want the full chunk.

* Update weed/filer/filechunks.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-03 14:26:15 -07:00
Chris Lu
4fb7bbb215
Filer Store: postgres backend support pgbouncer (#7077)
support pgbouncer
2025-08-03 11:56:04 -07:00
Chris Lu
d49b44f2a4
Postgres (CockroachDB) with full certificate verification (#7076)
* Postgres (CockroachDB) with full certificate verification

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* remove duplicated comments

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-03 09:43:33 -07:00
Chris Lu
8c23952326
separate context for filer store (#7075)
* separate context for filer store

* clone request id

* context.WithoutCancel
2025-08-03 09:20:17 -07:00
Chris Lu
0ecb466eda
Admin: refactoring active topology (#7073)
* refactoring

* add ec shard size

* address comments

* passing task id

There seems to be a disconnect between the pending tasks created in ActiveTopology and the TaskDetectionResult returned by this function. A taskID is generated locally and used to create pending tasks via AddPendingECShardTask, but this taskID is not stored in the TaskDetectionResult or passed along in any way.

This makes it impossible for the worker that eventually executes the task to know which pending task in ActiveTopology it corresponds to. Without the correct taskID, the worker cannot call AssignTask or CompleteTask on the master, breaking the entire task lifecycle and capacity management feature.

A potential solution is to add a TaskID field to TaskDetectionResult and worker_pb.TaskParams, ensuring the ID is propagated from detection to execution.

* 1 source multiple destinations

* task supports multi source and destination

* ec needs to clean up previous shards

* use erasure coding constants

* getPlanningCapacityUnsafe getEffectiveAvailableCapacityUnsafe  should return StorageSlotChange for calculation

* use CanAccommodate to calculate

* remove dead code

* address comments

* fix Mutex Copying in Protobuf Structs

* use constants

* fix estimatedSize

The calculation for estimatedSize only considers source.EstimatedSize and dest.StorageChange, but omits dest.EstimatedSize. The TaskDestination struct has an EstimatedSize field, which seems to be ignored here. This could lead to an incorrect estimation of the total size of data involved in tasks on a disk. The loop should probably also include estimatedSize += dest.EstimatedSize.

* at.assignTaskToDisk(task)

* refactoring

* Update weed/admin/topology/internal.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* fail fast

* fix compilation

* Update weed/worker/tasks/erasure_coding/detection.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* indexes for volume and shard locations

* dedup with ToVolumeSlots

* return an additional boolean to indicate success, or an error

* Update abstract_sql_store.go

* fix

* Update weed/worker/tasks/erasure_coding/detection.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update weed/admin/topology/task_management.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* faster findVolumeDisk

* Update weed/worker/tasks/erasure_coding/detection.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/admin/topology/storage_slot_test.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* refactor

* simplify

* remove unused GetDiskStorageImpact function

* refactor

* add comments

* Update weed/admin/topology/storage_impact.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update weed/admin/topology/storage_slot_test.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update storage_impact.go

* AddPendingTask

The unified AddPendingTask function now serves as the single entry point for all task creation, successfully consolidating the previously separate functions while maintaining full functionality and improving code organization.

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-03 01:35:38 -07:00
Ibrahim Konsowa
315fcc70b2
fix: dead letter message log message (#7072) 2025-08-02 08:21:57 -07:00
Chris Lu
9d013ea9b8
Admin UI: include ec shard sizes into volume server info (#7071)
* show ec shards on dashboard, show max in its own column

* master collect shard size info

* master send shard size via VolumeList

* change to more efficient shard sizes slice

* include ec shard sizes into volume server info

* Eliminated Redundant gRPC Calls

* much more efficient

* Efficient Counting: bits.OnesCount32() uses CPU-optimized instructions to count set bits in O(1)

* avoid extra volume list call

* simplify

* preserve existing shard sizes

* avoid hard coded value

* Update weed/storage/erasure_coding/ec_volume_info.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update weed/admin/dash/volume_management.go

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update ec_volume_info.go

* address comments

* avoid duplicated functions

* Update weed/admin/dash/volume_management.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* simplify

* refactoring

* fix compilation

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-02 02:16:49 -07:00
Chris Lu
3d4e8409a5
Support X-Forwarded-Port (#7070)
* support for the X-Forwarded-Prefix header

* remove comments

* refactoring

* refactoring

* path.Clean

* support X-Forwarded-Port

* Update weed/s3api/auth_signature_v4.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* Update weed/s3api/auto_signature_v4_test.go

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>

* more tests

---------

Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com>
2025-08-01 15:45:34 -07:00
Chris Lu
fd447465c2
fix parsing s3 tag (#7069)
* fix parsing s3 tag

fix https://github.com/seaweedfs/seaweedfs/issues/7040#issuecomment-3145615630

* url.ParseQuery
2025-08-01 15:45:23 -07:00
Chris Lu
f1eb4dd427
S3: support for the X-Forwarded-Prefix header (#7068)
* support for the X-Forwarded-Prefix header

* remove comments

* refactoring

* refactoring

* path.Clean
2025-08-01 13:07:54 -07:00
Chris Lu
52d87f1d29
S3: fix list buckets handler (#7067)
* s3: fix list buckets handler

* ListBuckets permission checking
2025-08-01 12:13:11 -07:00
Chris Lu
0975968e71
admin: Refactor task destination planning (#7063)
* refactor planning into task detection

* refactoring worker tasks

* refactor

* compiles, but only balance task is registered

* compiles, but has nil exception

* avoid nil logger

* add back ec task

* setting ec log directory

* implement balance and vacuum tasks

* EC tasks will no longer fail with "file not found" errors

* Use ReceiveFile API to send locally generated shards

* distributing shard files and ecx,ecj,vif files

* generate .ecx files correctly

* do not mount all possible EC shards (0-13) on every destination

* use constants

* delete all replicas

* rename files

* pass in volume size to tasks
2025-08-01 11:18:32 -07:00
219 changed files with 16475 additions and 5209 deletions

View file

@ -38,7 +38,7 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}
@ -87,7 +87,7 @@ jobs:
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Set BUILD_TIME env - name: Set BUILD_TIME env
run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV} run: echo BUILD_TIME=$(date -u +%Y%m%d-%H%M) >> ${GITHUB_ENV}

View file

@ -28,7 +28,7 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with: with:

View file

@ -28,7 +28,7 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with: with:

View file

@ -28,7 +28,7 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with: with:

View file

@ -28,7 +28,7 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with: with:

View file

@ -28,7 +28,7 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with: with:

View file

@ -28,7 +28,7 @@ jobs:
# Steps represent a sequence of tasks that will be executed as part of the job # Steps represent a sequence of tasks that will be executed as part of the job
steps: steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Go Release Binaries Normal Volume Size - name: Go Release Binaries Normal Volume Size
uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22 uses: wangyoucao577/go-release-action@481a2c1a0f1be199722e3e9b74d7199acafc30a8 # v1.22
with: with:

View file

@ -18,7 +18,7 @@ jobs:
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL

View file

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -42,14 +42,14 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Login to GHCR name: Login to GHCR
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }} username: ${{ secrets.GHCR_USERNAME }}

View file

@ -17,11 +17,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -43,14 +43,14 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}
- -
name: Login to GHCR name: Login to GHCR
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ secrets.GHCR_USERNAME }} username: ${{ secrets.GHCR_USERNAME }}

View file

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -41,7 +41,7 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -17,11 +17,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -42,7 +42,7 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -17,11 +17,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -42,7 +42,7 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -41,7 +41,7 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -16,11 +16,11 @@ jobs:
steps: steps:
- -
name: Checkout name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- -
name: Docker meta name: Docker meta
id: docker_meta id: docker_meta
uses: docker/metadata-action@902fa8ec7d6ecbf8d84d538b9b233a880e428804 # v3 uses: docker/metadata-action@c1e51972afc2121e065aed6d45c65596fe445f3f # v3
with: with:
images: | images: |
chrislusf/seaweedfs chrislusf/seaweedfs
@ -41,7 +41,7 @@ jobs:
- -
name: Login to Docker Hub name: Login to Docker Hub
if: github.event_name != 'pull_request' if: github.event_name != 'pull_request'
uses: docker/login-action@74a5d142397b4f367a81961eba4e8cd7edddf772 # v1 uses: docker/login-action@184bdaa0721073962dff0199f1fb9940f07167d1 # v1
with: with:
username: ${{ secrets.DOCKER_USERNAME }} username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }} password: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -21,7 +21,7 @@ jobs:
deploy: deploy:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5

View file

@ -9,6 +9,6 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: 'Checkout Repository' - name: 'Checkout Repository'
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- name: 'Dependency Review' - name: 'Dependency Review'
uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9

View file

@ -30,7 +30,7 @@ jobs:
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Install dependencies - name: Install dependencies
run: | run: |

View file

@ -33,7 +33,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go ${{ env.GO_VERSION }} - name: Set up Go ${{ env.GO_VERSION }}
uses: actions/setup-go@v5 uses: actions/setup-go@v5

View file

@ -27,7 +27,7 @@ jobs:
id: go id: go
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v2 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v2
- name: Get dependencies - name: Get dependencies
run: | run: |

View file

@ -12,9 +12,9 @@ jobs:
release: release:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
- name: Publish Helm charts - name: Publish Helm charts
uses: stefanprodan/helm-gh-pages@master uses: stefanprodan/helm-gh-pages@v1.7.0
with: with:
token: ${{ secrets.GITHUB_TOKEN }} token: ${{ secrets.GITHUB_TOKEN }}
charts_dir: k8s/charts charts_dir: k8s/charts

View file

@ -16,7 +16,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8
with: with:
fetch-depth: 0 fetch-depth: 0

View file

@ -25,7 +25,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -89,7 +89,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -137,7 +137,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -188,7 +188,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -255,7 +255,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -319,7 +319,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -370,7 +370,7 @@ jobs:
steps: steps:
- name: Check out code - name: Check out code
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go - name: Set up Go
uses: actions/setup-go@v5 uses: actions/setup-go@v5

View file

@ -20,7 +20,7 @@ jobs:
timeout-minutes: 15 timeout-minutes: 15
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -313,7 +313,7 @@ jobs:
timeout-minutes: 15 timeout-minutes: 15
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -439,7 +439,7 @@ jobs:
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -562,7 +562,7 @@ jobs:
timeout-minutes: 10 timeout-minutes: 10
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5
@ -662,7 +662,7 @@ jobs:
timeout-minutes: 15 timeout-minutes: 15
steps: steps:
- name: Check out code into the Go module directory - name: Check out code into the Go module directory
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Go 1.x - name: Set up Go 1.x
uses: actions/setup-go@v5 uses: actions/setup-go@v5

View file

@ -20,7 +20,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 5 timeout-minutes: 5
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v5
- uses: actions/setup-go@v5 - uses: actions/setup-go@v5
with: with:

View file

@ -116,7 +116,7 @@ services:
ports: ports:
- "23646:23646" # HTTP admin interface (default port) - "23646:23646" # HTTP admin interface (default port)
- "33646:33646" # gRPC worker communication (23646 + 10000) - "33646:33646" # gRPC worker communication (23646 + 10000)
command: "admin -port=23646 -masters=master:9333 -dataDir=/data" command: "-v=2 admin -port=23646 -masters=master:9333 -dataDir=/data"
depends_on: depends_on:
- master - master
- filer - filer

78
go.mod
View file

@ -6,12 +6,12 @@ toolchain go1.24.1
require ( require (
cloud.google.com/go v0.121.4 // indirect cloud.google.com/go v0.121.4 // indirect
cloud.google.com/go/pubsub v1.49.0 cloud.google.com/go/pubsub v1.50.0
cloud.google.com/go/storage v1.56.0 cloud.google.com/go/storage v1.56.0
github.com/Azure/azure-pipeline-go v0.2.3 github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0 github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.38.1 github.com/Shopify/sarama v1.38.1
github.com/aws/aws-sdk-go v1.55.7 github.com/aws/aws-sdk-go v1.55.8
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/bwmarrin/snowflake v0.3.0 github.com/bwmarrin/snowflake v0.3.0
github.com/cenkalti/backoff/v4 v4.3.0 github.com/cenkalti/backoff/v4 v4.3.0
@ -45,6 +45,7 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/jackc/pgx/v5 v5.7.5
github.com/jcmturner/gofork v1.7.6 // indirect github.com/jcmturner/gofork v1.7.6 // indirect
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jinzhu/copier v0.4.0 github.com/jinzhu/copier v0.4.0
@ -54,7 +55,6 @@ require (
github.com/klauspost/compress v1.18.0 // indirect github.com/klauspost/compress v1.18.0 // indirect
github.com/klauspost/reedsolomon v1.12.5 github.com/klauspost/reedsolomon v1.12.5
github.com/kurin/blazer v0.5.3 github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.9
github.com/linxGnu/grocksdb v1.10.1 github.com/linxGnu/grocksdb v1.10.1
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect github.com/mattn/go-ieproxy v0.0.11 // indirect
@ -67,9 +67,9 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/posener/complete v1.2.3 github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.2.0 github.com/pquerna/cachecontrol v0.2.0
github.com/prometheus/client_golang v1.22.0 github.com/prometheus/client_golang v1.23.0
github.com/prometheus/client_model v0.6.2 // indirect github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect github.com/prometheus/common v0.65.0 // indirect
github.com/prometheus/procfs v0.17.0 github.com/prometheus/procfs v0.17.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
@ -99,24 +99,24 @@ require (
gocloud.dev v0.43.0 gocloud.dev v0.43.0
gocloud.dev/pubsub/natspubsub v0.43.0 gocloud.dev/pubsub/natspubsub v0.43.0
gocloud.dev/pubsub/rabbitpubsub v0.43.0 gocloud.dev/pubsub/rabbitpubsub v0.43.0
golang.org/x/crypto v0.40.0 golang.org/x/crypto v0.41.0
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/image v0.29.0 golang.org/x/image v0.30.0
golang.org/x/net v0.42.0 golang.org/x/net v0.43.0
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sys v0.34.0 golang.org/x/sys v0.35.0
golang.org/x/text v0.27.0 // indirect golang.org/x/text v0.28.0 // indirect
golang.org/x/tools v0.35.0 golang.org/x/tools v0.36.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/api v0.243.0 google.golang.org/api v0.246.0
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 // indirect
google.golang.org/grpc v1.74.2 google.golang.org/grpc v1.74.2
google.golang.org/protobuf v1.36.6 google.golang.org/protobuf v1.36.7
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect modernc.org/b v1.0.0 // indirect
modernc.org/mathutil v1.7.1 modernc.org/mathutil v1.7.1
modernc.org/memory v1.11.0 // indirect modernc.org/memory v1.11.0 // indirect
modernc.org/sqlite v1.38.1 modernc.org/sqlite v1.38.2
modernc.org/strutil v1.2.1 modernc.org/strutil v1.2.1
) )
@ -126,16 +126,16 @@ require (
github.com/a-h/templ v0.3.924 github.com/a-h/templ v0.3.924
github.com/arangodb/go-driver v1.6.6 github.com/arangodb/go-driver v1.6.6
github.com/armon/go-metrics v0.4.1 github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go-v2 v1.36.6 github.com/aws/aws-sdk-go-v2 v1.37.2
github.com/aws/aws-sdk-go-v2/config v1.29.18 github.com/aws/aws-sdk-go-v2/config v1.30.3
github.com/aws/aws-sdk-go-v2/credentials v1.17.71 github.com/aws/aws-sdk-go-v2/credentials v1.18.3
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1 github.com/aws/aws-sdk-go-v2/service/s3 v1.86.0
github.com/cognusion/imaging v1.0.2 github.com/cognusion/imaging v1.0.2
github.com/fluent/fluent-logger-golang v1.10.0 github.com/fluent/fluent-logger-golang v1.10.0
github.com/getsentry/sentry-go v0.34.1 github.com/getsentry/sentry-go v0.35.0
github.com/gin-contrib/sessions v1.0.4 github.com/gin-contrib/sessions v1.0.4
github.com/gin-gonic/gin v1.10.1 github.com/gin-gonic/gin v1.10.1
github.com/golang-jwt/jwt/v5 v5.2.3 github.com/golang-jwt/jwt/v5 v5.3.0
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50 github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hanwen/go-fuse/v2 v2.8.0 github.com/hanwen/go-fuse/v2 v2.8.0
github.com/hashicorp/raft v1.7.3 github.com/hashicorp/raft v1.7.3
@ -147,13 +147,13 @@ require (
github.com/rabbitmq/amqp091-go v1.10.0 github.com/rabbitmq/amqp091-go v1.10.0
github.com/rclone/rclone v1.70.3 github.com/rclone/rclone v1.70.3
github.com/rdleal/intervalst v1.5.0 github.com/rdleal/intervalst v1.5.0
github.com/redis/go-redis/v9 v9.11.0 github.com/redis/go-redis/v9 v9.12.0
github.com/schollz/progressbar/v3 v3.18.0 github.com/schollz/progressbar/v3 v3.18.0
github.com/shirou/gopsutil/v3 v3.24.5 github.com/shirou/gopsutil/v3 v3.24.5
github.com/tarantool/go-tarantool/v2 v2.4.0 github.com/tarantool/go-tarantool/v2 v2.4.0
github.com/tikv/client-go/v2 v2.0.7 github.com/tikv/client-go/v2 v2.0.7
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0 github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.113.2 github.com/ydb-platform/ydb-go-sdk/v3 v3.113.5
go.etcd.io/etcd/client/pkg/v3 v3.6.4 go.etcd.io/etcd/client/pkg/v3 v3.6.4
go.uber.org/atomic v1.11.0 go.uber.org/atomic v1.11.0
golang.org/x/sync v0.16.0 golang.org/x/sync v0.16.0
@ -163,7 +163,11 @@ require (
require github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect require github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 // indirect
require ( require (
cloud.google.com/go/pubsub/v2 v2.0.0 // indirect
github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/lithammer/shortuuid/v3 v3.0.7 // indirect github.com/lithammer/shortuuid/v3 v3.0.7 // indirect
) )
@ -202,23 +206,23 @@ require (
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.2 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.2 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 // indirect github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.2 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18 // indirect github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.27.0 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.32.0 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 // indirect github.com/aws/aws-sdk-go-v2/service/sts v1.36.0 // indirect
github.com/aws/smithy-go v1.22.4 // indirect github.com/aws/smithy-go v1.22.5 // indirect
github.com/boltdb/bolt v1.3.1 // indirect github.com/boltdb/bolt v1.3.1 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect github.com/bradenaw/juniper v0.15.3 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
@ -389,10 +393,10 @@ require (
go.uber.org/multierr v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect go.uber.org/zap v1.27.0 // indirect
golang.org/x/arch v0.16.0 // indirect golang.org/x/arch v0.16.0 // indirect
golang.org/x/term v0.33.0 // indirect golang.org/x/term v0.34.0 // indirect
golang.org/x/time v0.12.0 // indirect golang.org/x/time v0.12.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect gopkg.in/validator.v2 v2.0.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

164
go.sum
View file

@ -383,8 +383,10 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp
cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
cloud.google.com/go/pubsub v1.49.0 h1:5054IkbslnrMCgA2MAEPcsN3Ky+AyMpEZcii/DoySPo= cloud.google.com/go/pubsub v1.50.0 h1:hnYpOIxVlgVD1Z8LN7est4DQZK3K6tvZNurZjIVjUe0=
cloud.google.com/go/pubsub v1.49.0/go.mod h1:K1FswTWP+C1tI/nfi3HQecoVeFvL4HUOB1tdaNXKhUY= cloud.google.com/go/pubsub v1.50.0/go.mod h1:Di2Y+nqXBpIS+dXUEJPQzLh8PbIQZMLE9IVUFhf2zmM=
cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0=
cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E=
cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
@ -657,50 +659,50 @@ github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJ
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ=
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk=
github.com/aws/aws-sdk-go-v2 v1.36.6 h1:zJqGjVbRdTPojeCGWn5IR5pbJwSQSBh5RWFTQcEQGdU= github.com/aws/aws-sdk-go-v2 v1.37.2 h1:xkW1iMYawzcmYFYEV0UCMxc8gSsjCGEhBXQkdQywVbo=
github.com/aws/aws-sdk-go-v2 v1.36.6/go.mod h1:EYrzvCCN9CMUTa5+6lf6MM4tq3Zjp8UhSGR/cBsjai0= github.com/aws/aws-sdk-go-v2 v1.37.2/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 h1:12SpdwU8Djs+YGklkinSSlcrPyj3H4VifVsKf78KbwA= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11/go.mod h1:dd+Lkp6YmMryke+qxW/VnKyhMBDTYP41Q2Bb+6gNZgY= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg=
github.com/aws/aws-sdk-go-v2/config v1.29.18 h1:x4T1GRPnqKV8HMJOMtNktbpQMl3bIsfx8KbqmveUO2I= github.com/aws/aws-sdk-go-v2/config v1.30.3 h1:utupeVnE3bmB221W08P0Moz1lDI3OwYa2fBtUhl7TCc=
github.com/aws/aws-sdk-go-v2/config v1.29.18/go.mod h1:bvz8oXugIsH8K7HLhBv06vDqnFv3NsGDt2Znpk7zmOU= github.com/aws/aws-sdk-go-v2/config v1.30.3/go.mod h1:NDGwOEBdpyZwLPlQkpKIO7frf18BW8PaCmAM9iUxQmI=
github.com/aws/aws-sdk-go-v2/credentials v1.17.71 h1:r2w4mQWnrTMJjOyIsZtGp3R3XGY3nqHn8C26C2lQWgA= github.com/aws/aws-sdk-go-v2/credentials v1.18.3 h1:ptfyXmv+ooxzFwyuBth0yqABcjVIkjDL0iTYZBSbum8=
github.com/aws/aws-sdk-go-v2/credentials v1.17.71/go.mod h1:E7VF3acIup4GB5ckzbKFrCK0vTvEQxOxgdq4U3vcMCY= github.com/aws/aws-sdk-go-v2/credentials v1.18.3/go.mod h1:Q43Nci++Wohb0qUh4m54sNln0dbxJw8PvQWkrwOkGOI=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33 h1:D9ixiWSG4lyUBL2DDNK924Px9V/NBVpML90MHqyTADY= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.2 h1:nRniHAvjFJGUCl04F3WaAj7qp/rcz5Gi1OVoj5ErBkc=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.33/go.mod h1:caS/m4DI+cij2paz3rtProRBI4s/+TCiWoaWZuQ9010= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.2/go.mod h1:eJDFKAMHHUvv4a0Zfa7bQb//wFNUXGrbFpYRCHe2kD0=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84 h1:cTXRdLkpBanlDwISl+5chq5ui1d1YWg4PWMR9c3kXyw=
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo= github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.84/go.mod h1:kwSy5X7tfIHN39uucmjQVs2LvDdXEjQucgQQEqCggEo=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37 h1:osMWfm/sC/L4tvEdQ65Gri5ZZDCUpuYJZbTTDrsn4I0= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.2 h1:sPiRHLVUIIQcoVZTNwqQcdtjkqkPopyYmIX0M5ElRf4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.37/go.mod h1:ZV2/1fbjOPr4G4v38G3Ww5TBT4+hmsK45s/rxu1fGy0= github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.2/go.mod h1:ik86P3sgV+Bk7c1tBFCwI3VxMoSEwl4YkRB9xn1s340=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37 h1:v+X21AvTb2wZ+ycg1gx+orkB/9U6L7AOp93R7qYxsxM= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.2 h1:ZdzDAg075H6stMZtbD2o+PyB933M/f20e9WmCBC17wA=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.37/go.mod h1:G0uM1kyssELxmJ2VZEfG0q2npObR3BAkF3c1VsfVnfs= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.2/go.mod h1:eE1IIzXG9sdZCB0pNNpMpsYTLl4YdOQD3njiVN1e/E4=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37 h1:XTZZ0I3SZUHAtBLBU6395ad+VOblE0DwQP6MuaNeics= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.2 h1:sBpc8Ph6CpfZsEdkz/8bfg8WhKlWMCms5iWj6W/AW2U=
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.37/go.mod h1:Pi6ksbniAWVwu2S8pEzcYPyhUkAcLaufxN7PfAUQjBk= github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.2/go.mod h1:Z2lDojZB+92Wo6EKiZZmJid9pPrDJW2NNIXSlaEfVlU=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.5 h1:M5/B8JUaCI8+9QD+u3S/f4YHpvqE9RpSkV3rf0Iks2w= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.2 h1:blV3dY6WbxIVOFggfYIo2E1Q2lZoy5imS7nKgu5m6Tc=
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.5/go.mod h1:Bktzci1bwdbpuLiu3AOksiNPMl/LLKmX1TWmqp2xbvs= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.2/go.mod h1:cBWNeLBjHJRSmXAxdS7mwiMUEgx6zup4wQ9J+/PcsRQ=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18 h1:vvbXsA2TVO80/KT7ZqCbx934dt6PY+vQ8hZpUZ/cpYg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.2 h1:oxmDEO14NBZJbK/M8y3brhMFEIGN4j8a6Aq8eY0sqlo=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.18/go.mod h1:m2JJHledjBGNMsLOF1g9gbAxprzq3KjC8e4lxtn+eWg= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.2/go.mod h1:4hH+8QCrk1uRWDPsVfsNDUup3taAjO8Dnx63au7smAU=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18 h1:OS2e0SKqsU2LiJPqL8u9x41tKc6MMEHrWjLVLn3oysg= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.2 h1:0hBNFAPwecERLzkhhBY+lQKUMpXSKVv4Sxovikrioms=
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.18/go.mod h1:+Yrk+MDGzlNGxCXieljNeWpoZTCQUQVL+Jk9hGGJ8qM= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.2/go.mod h1:Vcnh4KyR4imrrjGN7A2kP2v9y6EPudqoPKXtnmBliPU=
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1 h1:RkHXU9jP0DptGy7qKI8CBGsUJruWz0v5IgwBa2DwWcU= github.com/aws/aws-sdk-go-v2/service/s3 v1.86.0 h1:utPhv4ECQzJIUbtx7vMN4A8uZxlQ5tSt1H1toPI41h8=
github.com/aws/aws-sdk-go-v2/service/s3 v1.84.1/go.mod h1:3xAOf7tdKF+qbb+XpU+EPhNXAdun3Lu1RcDrj8KC24I= github.com/aws/aws-sdk-go-v2/service/s3 v1.86.0/go.mod h1:1/eZYtTWazDgVl96LmGdGktHFi7prAcGCrJ9JGvBITU=
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 h1:OBuZE9Wt8h2imuRktu+WfjiTGrnYdCIJg8IX92aalHE= github.com/aws/aws-sdk-go-v2/service/sns v1.34.7 h1:OBuZE9Wt8h2imuRktu+WfjiTGrnYdCIJg8IX92aalHE=
github.com/aws/aws-sdk-go-v2/service/sns v1.34.7/go.mod h1:4WYoZAhHt+dWYpoOQUgkUKfuQbE6Gg/hW4oXE0pKS9U= github.com/aws/aws-sdk-go-v2/service/sns v1.34.7/go.mod h1:4WYoZAhHt+dWYpoOQUgkUKfuQbE6Gg/hW4oXE0pKS9U=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHfiv5OCRwN/EnCOXQ= github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHfiv5OCRwN/EnCOXQ=
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI= github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6 h1:rGtWqkQbPk7Bkwuv3NzpE/scwwL9sC1Ul3tn9x83DUI= github.com/aws/aws-sdk-go-v2/service/sso v1.27.0 h1:j7/jTOjWeJDolPwZ/J4yZ7dUsxsWZEsxNwH5O7F8eEA=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.6/go.mod h1:u4ku9OLv4TO4bCPdxf4fA1upaMaJmP9ZijGk3AAOC6Q= github.com/aws/aws-sdk-go-v2/service/sso v1.27.0/go.mod h1:M0xdEPQtgpNT7kdAX4/vOAPkFj60hSQRb7TvW9B0iug=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4 h1:OV/pxyXh+eMA0TExHEC4jyWdumLxNbzz1P0zJoezkJc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.32.0 h1:ywQF2N4VjqX+Psw+jLjMmUL2g1RDHlvri3NxHA08MGI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.4/go.mod h1:8Mm5VGYwtm+r305FfPSuc+aFkrypeylGYhFim6XEPoc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.32.0/go.mod h1:Z+qv5Q6b7sWiclvbJyPSOT1BRVU9wfSUPaqQzZ1Xg3E=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1 h1:aUrLQwJfZtwv3/ZNG2xRtEen+NqI3iesuacjP51Mv1s= github.com/aws/aws-sdk-go-v2/service/sts v1.36.0 h1:bRP/a9llXSSgDPk7Rqn5GD/DQCGo6uk95plBFKoXt2M=
github.com/aws/aws-sdk-go-v2/service/sts v1.34.1/go.mod h1:3wFBZKoWnX3r+Sm7in79i54fBmNfwhdNdQuscCw7QIk= github.com/aws/aws-sdk-go-v2/service/sts v1.36.0/go.mod h1:tgBsFzxwl65BWkuJ/x2EUs59bD4SfYKgikvFDJi1S58=
github.com/aws/smithy-go v1.22.4 h1:uqXzVZNuNexwc/xrh6Tb56u89WDlJY6HS+KC0S4QSjw= github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw=
github.com/aws/smithy-go v1.22.4/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
@ -893,8 +895,8 @@ github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBv
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64= github.com/geoffgarside/ber v1.2.0 h1:/loowoRcs/MWLYmGX9QtIAbA+V/FrnVLsMMPhwiRm64=
github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc= github.com/geoffgarside/ber v1.2.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/getsentry/sentry-go v0.34.1 h1:HSjc1C/OsnZttohEPrrqKH42Iud0HuLCXpv8cU1pWcw= github.com/getsentry/sentry-go v0.35.0 h1:+FJNlnjJsZMG3g0/rmmP7GiKjQoUF5EXfEtBwtPtkzY=
github.com/getsentry/sentry-go v0.34.1/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE= github.com/getsentry/sentry-go v0.35.0/go.mod h1:C55omcY9ChRQIUcVcGcs+Zdy4ZpQGvNJ7JYHIoSWOtE=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/sessions v1.0.4 h1:ha6CNdpYiTOK/hTp05miJLbpTSNfOnFg5Jm2kbcqy8U= github.com/gin-contrib/sessions v1.0.4 h1:ha6CNdpYiTOK/hTp05miJLbpTSNfOnFg5Jm2kbcqy8U=
github.com/gin-contrib/sessions v1.0.4/go.mod h1:ccmkrb2z6iU2osiAHZG3x3J4suJK+OU27oqzlWOqQgs= github.com/gin-contrib/sessions v1.0.4/go.mod h1:ccmkrb2z6iU2osiAHZG3x3J4suJK+OU27oqzlWOqQgs=
@ -985,8 +987,8 @@ github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=
github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@ -1198,6 +1200,14 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs=
github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
@ -1291,8 +1301,6 @@ github.com/lanrat/extsort v1.0.2 h1:p3MLVpQEPwEGPzeLBb+1eSErzRl6Bgjgr+qnIs2RxrU=
github.com/lanrat/extsort v1.0.2/go.mod h1:ivzsdLm8Tv+88qbdpMElV6Z15StlzPUtZSKsGb51hnQ= github.com/lanrat/extsort v1.0.2/go.mod h1:ivzsdLm8Tv+88qbdpMElV6Z15StlzPUtZSKsGb51hnQ=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/linxGnu/grocksdb v1.10.1 h1:YX6gUcKvSC3d0s9DaqgbU+CRkZHzlELgHu1Z/kmtslg= github.com/linxGnu/grocksdb v1.10.1 h1:YX6gUcKvSC3d0s9DaqgbU+CRkZHzlELgHu1Z/kmtslg=
github.com/linxGnu/grocksdb v1.10.1/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk= github.com/linxGnu/grocksdb v1.10.1/go.mod h1:C3CNe9UYc9hlEM2pC82AqiGS3LRW537u9LFV4wIZuHk=
github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8= github.com/lithammer/shortuuid/v3 v3.0.7 h1:trX0KTHy4Pbwo/6ia8fscyHoGA+mf1jWbPJVuvyJQQ8=
@ -1452,8 +1460,8 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -1465,8 +1473,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4= github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@ -1486,8 +1494,8 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5X
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rdleal/intervalst v1.5.0 h1:SEB9bCFz5IqD1yhfH1Wv8IBnY/JQxDplwkxHjT6hamU= github.com/rdleal/intervalst v1.5.0 h1:SEB9bCFz5IqD1yhfH1Wv8IBnY/JQxDplwkxHjT6hamU=
github.com/rdleal/intervalst v1.5.0/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ= github.com/rdleal/intervalst v1.5.0/go.mod h1:xO89Z6BC+LQDH+IPQQw/OESt5UADgFD41tYMUINGpxQ=
github.com/redis/go-redis/v9 v9.11.0 h1:E3S08Gl/nJNn5vkxd2i78wZxWAPNZgUNTp8WIJUAiIs= github.com/redis/go-redis/v9 v9.12.0 h1:XlVPGlflh4nxfhsNXPA8Qp6EmEfTo0rp8oaBzPipXnU=
github.com/redis/go-redis/v9 v9.11.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw= github.com/redis/go-redis/v9 v9.12.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo= github.com/redis/rueidis v1.0.19 h1:s65oWtotzlIFN8eMPhyYwxlwLR1lUdhza2KtWprKYSo=
github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo= github.com/redis/rueidis v1.0.19/go.mod h1:8B+r5wdnjwK3lTFml5VtxjzGOQAC+5UmujoD12pDrEo=
github.com/rekby/fixenv v0.3.2/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c= github.com/rekby/fixenv v0.3.2/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c=
@ -1668,8 +1676,8 @@ github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0 h1:/NyPd9KnCJgzrEXCArqk1T
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0/go.mod h1:9YzkhlIymWaJGX6KMU3vh5sOf3UKbCXkG/ZdjaI3zNM= github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0/go.mod h1:9YzkhlIymWaJGX6KMU3vh5sOf3UKbCXkG/ZdjaI3zNM=
github.com/ydb-platform/ydb-go-sdk/v3 v3.44.0/go.mod h1:oSLwnuilwIpaF5bJJMAofnGgzPJusoI3zWMNb8I+GnM= github.com/ydb-platform/ydb-go-sdk/v3 v3.44.0/go.mod h1:oSLwnuilwIpaF5bJJMAofnGgzPJusoI3zWMNb8I+GnM=
github.com/ydb-platform/ydb-go-sdk/v3 v3.47.3/go.mod h1:bWnOIcUHd7+Sl7DN+yhyY1H/I61z53GczvwJgXMgvj0= github.com/ydb-platform/ydb-go-sdk/v3 v3.47.3/go.mod h1:bWnOIcUHd7+Sl7DN+yhyY1H/I61z53GczvwJgXMgvj0=
github.com/ydb-platform/ydb-go-sdk/v3 v3.113.2 h1:rNURHNc9pU755NCP4e5oIiKfn02mmD9AVKpbdSb4a9g= github.com/ydb-platform/ydb-go-sdk/v3 v3.113.5 h1:olAAZfpMnFYChJNgZJ16G4jqoelRNx7Kx4tW50XcMv0=
github.com/ydb-platform/ydb-go-sdk/v3 v3.113.2/go.mod h1:Pp1w2xxUoLQ3NCNAwV7pvDq0TVQOdtAqs+ZiC+i8r14= github.com/ydb-platform/ydb-go-sdk/v3 v3.113.5/go.mod h1:Pp1w2xxUoLQ3NCNAwV7pvDq0TVQOdtAqs+ZiC+i8r14=
github.com/ydb-platform/ydb-go-yc v0.12.1 h1:qw3Fa+T81+Kpu5Io2vYHJOwcrYrVjgJlT6t/0dOXJrA= github.com/ydb-platform/ydb-go-yc v0.12.1 h1:qw3Fa+T81+Kpu5Io2vYHJOwcrYrVjgJlT6t/0dOXJrA=
github.com/ydb-platform/ydb-go-yc v0.12.1/go.mod h1:t/ZA4ECdgPWjAb4jyDe8AzQZB5dhpGbi3iCahFaNwBY= github.com/ydb-platform/ydb-go-yc v0.12.1/go.mod h1:t/ZA4ECdgPWjAb4jyDe8AzQZB5dhpGbi3iCahFaNwBY=
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 h1:9E5q8Nsy2RiJMZDNVy0A3KUrIMBPakJ2VgloeWbcI84= github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 h1:9E5q8Nsy2RiJMZDNVy0A3KUrIMBPakJ2VgloeWbcI84=
@ -1697,8 +1705,8 @@ github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtC
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
go.einride.tech/aip v0.68.1 h1:16/AfSxcQISGN5z9C5lM+0mLYXihrHbQ1onvYTr93aQ= go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps=
go.einride.tech/aip v0.68.1/go.mod h1:XaFtaj4HuA3Zwk9xoBtTWgNubZ0ZZXv9BZJCkuKuWbg= go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ=
go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk=
go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk=
go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo= go.etcd.io/etcd/api/v3 v3.6.4 h1:7F6N7toCKcV72QmoUKa23yYLiiljMrT4xCeBL9BmXdo=
@ -1793,8 +1801,8 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1825,8 +1833,8 @@ golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeap
golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.29.0 h1:HcdsyR4Gsuys/Axh0rDEmlBmB68rW1U9BUdB3UVHsas= golang.org/x/image v0.30.0 h1:jD5RhkmVAnjqaCUXfbGBrn3lpxbknfN9w2UhHHU+5B4=
golang.org/x/image v0.29.0/go.mod h1:RVJROnf3SLK8d26OW91j4FrIHGbsJ8QnbEocVTOWQDA= golang.org/x/image v0.30.0/go.mod h1:SAEUTxCCMWSrJcCy/4HwavEsfZZJlYxeHLc6tTiAe/c=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1861,8 +1869,8 @@ golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1933,8 +1941,8 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -2092,8 +2100,8 @@ golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -2110,8 +2118,8 @@ golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -2132,8 +2140,8 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -2212,8 +2220,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -2287,8 +2295,8 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
google.golang.org/api v0.243.0 h1:sw+ESIJ4BVnlJcWu9S+p2Z6Qq1PjG77T8IJ1xtp4jZQ= google.golang.org/api v0.246.0 h1:H0ODDs5PnMZVZAEtdLMn2Ul2eQi7QNjqM2DIFp8TlTM=
google.golang.org/api v0.243.0/go.mod h1:GE4QtYfaybx1KmeHMdBnNnyLzBZCVihGBXAmJu/uUr8= google.golang.org/api v0.246.0/go.mod h1:dMVhVcylamkirHdzEBAIQWUCgqY885ivNeZYd7VAVr8=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -2426,8 +2434,8 @@ google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79 h1:Nt6z9UHqSlIdIGJ
google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79/go.mod h1:kTmlBHMPqR5uCZPBvwa2B18mvubkjyY3CRLI0c6fj0s= google.golang.org/genproto v0.0.0-20250715232539-7130f93afb79/go.mod h1:kTmlBHMPqR5uCZPBvwa2B18mvubkjyY3CRLI0c6fj0s=
google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 h1:mVXdvnmR3S3BQOqHECm9NGMjYiRtEvDYcqAqedTXY6s= google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074 h1:mVXdvnmR3S3BQOqHECm9NGMjYiRtEvDYcqAqedTXY6s=
google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074/go.mod h1:vYFwMYFbmA8vl6Z/krj/h7+U/AqpHknwJX4Uqgfyc7I= google.golang.org/genproto/googleapis/api v0.0.0-20250721164621-a45f3dfb1074/go.mod h1:vYFwMYFbmA8vl6Z/krj/h7+U/AqpHknwJX4Uqgfyc7I=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074 h1:qJW29YvkiJmXOYMu5Tf8lyrTp3dOS+K4z6IixtLaCf8= google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0 h1:MAKi5q709QWfnkkpNQ0M12hYJ1+e8qYVDyowc4U1XZM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250721164621-a45f3dfb1074/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/genproto/googleapis/rpc v0.0.0-20250728155136-f173205681a0/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -2491,8 +2499,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -2586,8 +2594,8 @@ modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns=
modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w=
modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE=
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
modernc.org/sqlite v1.38.1 h1:jNnIjleVta+DKSAr3TnkKK87EEhjPhBLzi6hvIX9Bas= modernc.org/sqlite v1.38.2 h1:Aclu7+tgjgcQVShZqim41Bbw9Cho0y/7WzYptXqkEek=
modernc.org/sqlite v1.38.1/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E= modernc.org/sqlite v1.38.2/go.mod h1:cPTJYSlgg3Sfg046yBShXENNtPrWrDX8bsbAQBzgQ5E=
modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=

View file

@ -53,7 +53,7 @@ spec:
{{- $configSecret := (lookup "v1" "Secret" .Release.Namespace .Values.filer.s3.existingConfigSecret) | default dict }} {{- $configSecret := (lookup "v1" "Secret" .Release.Namespace .Values.filer.s3.existingConfigSecret) | default dict }}
checksum/s3config: {{ $configSecret | toYaml | sha256sum }} checksum/s3config: {{ $configSecret | toYaml | sha256sum }}
{{- else }} {{- else }}
checksum/s3config: {{ include (print .Template.BasePath "/s3-secret.yaml") . | sha256sum }} checksum/s3config: {{ include (print .Template.BasePath "/s3/s3-secret.yaml") . | sha256sum }}
{{- end }} {{- end }}
spec: spec:
restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }} restartPolicy: {{ default .Values.global.restartPolicy .Values.filer.restartPolicy }}

View file

@ -1088,7 +1088,6 @@ allInOne:
enabled: false enabled: false
imageOverride: null imageOverride: null
restartPolicy: Always restartPolicy: Always
replicas: 1
# Core configuration # Core configuration
idleTimeout: 30 # Connection idle seconds idleTimeout: 30 # Connection idle seconds

View file

@ -2,6 +2,7 @@ package basic
import ( import (
"fmt" "fmt"
"strings"
"testing" "testing"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
@ -147,3 +148,81 @@ func TestObjectTaggingWithEncodedValues(t *testing.T) {
Key: aws.String("testDir/testObjectWithEncodedTags"), Key: aws.String("testDir/testObjectWithEncodedTags"),
}) })
} }
// TestObjectUploadWithEncodedTags tests the specific issue reported in GitHub issue #7040
// where tags sent via X-Amz-Tagging header during object upload are not URL decoded properly
func TestObjectUploadWithEncodedTags(t *testing.T) {
// This test specifically addresses the issue where tags with special characters
// (like spaces, colons, slashes) sent during object upload are not URL decoded
// This tests the fix in filer_server_handlers_write_autochunk.go
objectKey := "testDir/testObjectUploadWithTags"
// Upload object with tags that contain special characters that would be URL encoded
// The AWS SDK will automatically URL encode these when sending the X-Amz-Tagging header
// Test edge cases that url.ParseQuery handles better than manual parsing:
// - Values containing "=" characters
// - Empty values
// - Complex special characters
_, err := svc.PutObject(&s3.PutObjectInput{
Bucket: aws.String("theBucket"),
Key: aws.String(objectKey),
Body: aws.ReadSeekCloser(strings.NewReader("test content")),
Tagging: aws.String("Timestamp=2025-07-16 14:40:39&Path=/tmp/file.txt&Description=A test file with spaces&Equation=x=y+1&EmptyValue=&Complex=A%20tag%20with%20%26%20%3D%20chars"),
})
if err != nil {
t.Fatalf("Failed to upload object with tags: %v", err)
}
// Get the tags back to verify they were properly URL decoded during upload
response, err := svc.GetObjectTagging(&s3.GetObjectTaggingInput{
Bucket: aws.String("theBucket"),
Key: aws.String(objectKey),
})
if err != nil {
t.Fatalf("Failed to get object tags: %v", err)
}
// Verify that the tags are properly decoded (not URL encoded)
tagMap := make(map[string]string)
for _, tag := range response.TagSet {
tagMap[*tag.Key] = *tag.Value
}
// Test cases for values that would be URL encoded in the X-Amz-Tagging header
testCases := []struct {
key string
expectedValue string
description string
}{
{"Timestamp", "2025-07-16 14:40:39", "timestamp with spaces and colons"},
{"Path", "/tmp/file.txt", "file path with slashes"},
{"Description", "A test file with spaces", "description with spaces"},
}
for _, tc := range testCases {
actualValue, exists := tagMap[tc.key]
if !exists {
t.Errorf("Expected tag key '%s' not found", tc.key)
continue
}
if actualValue != tc.expectedValue {
t.Errorf("Tag '%s' (%s): expected '%s', got '%s'",
tc.key, tc.description, tc.expectedValue, actualValue)
} else {
fmt.Printf("✓ Tag '%s' correctly decoded: '%s'\n", tc.key, actualValue)
}
}
// Clean up
_, err = svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String("theBucket"),
Key: aws.String(objectKey),
})
if err != nil {
t.Logf("Warning: Failed to clean up test object: %v", err)
}
}

View file

@ -23,6 +23,10 @@ type AdminData struct {
MessageBrokers []MessageBrokerNode `json:"message_brokers"` MessageBrokers []MessageBrokerNode `json:"message_brokers"`
DataCenters []DataCenter `json:"datacenters"` DataCenters []DataCenter `json:"datacenters"`
LastUpdated time.Time `json:"last_updated"` LastUpdated time.Time `json:"last_updated"`
// EC shard totals for dashboard
TotalEcVolumes int `json:"total_ec_volumes"` // Total number of EC volumes across all servers
TotalEcShards int `json:"total_ec_shards"` // Total number of EC shards across all servers
} }
// Object Store Users management structures // Object Store Users management structures
@ -98,6 +102,13 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
return AdminData{}, err return AdminData{}, err
} }
// Get volume servers data with EC shard information
volumeServersData, err := s.GetClusterVolumeServers()
if err != nil {
glog.Errorf("Failed to get cluster volume servers: %v", err)
return AdminData{}, err
}
// Get master nodes status // Get master nodes status
masterNodes := s.getMasterNodesStatus() masterNodes := s.getMasterNodesStatus()
@ -122,6 +133,19 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
// Keep default value on error // Keep default value on error
} }
// Calculate EC shard totals
var totalEcVolumes, totalEcShards int
ecVolumeSet := make(map[uint32]bool) // To avoid counting the same EC volume multiple times
for _, vs := range volumeServersData.VolumeServers {
totalEcShards += vs.EcShards
// Count unique EC volumes across all servers
for _, ecInfo := range vs.EcShardDetails {
ecVolumeSet[ecInfo.VolumeID] = true
}
}
totalEcVolumes = len(ecVolumeSet)
// Prepare admin data // Prepare admin data
adminData := AdminData{ adminData := AdminData{
Username: username, Username: username,
@ -130,11 +154,13 @@ func (s *AdminServer) GetAdminData(username string) (AdminData, error) {
TotalSize: topology.TotalSize, TotalSize: topology.TotalSize,
VolumeSizeLimitMB: volumeSizeLimitMB, VolumeSizeLimitMB: volumeSizeLimitMB,
MasterNodes: masterNodes, MasterNodes: masterNodes,
VolumeServers: topology.VolumeServers, VolumeServers: volumeServersData.VolumeServers,
FilerNodes: filerNodes, FilerNodes: filerNodes,
MessageBrokers: messageBrokers, MessageBrokers: messageBrokers,
DataCenters: topology.DataCenters, DataCenters: topology.DataCenters,
LastUpdated: topology.UpdatedAt, LastUpdated: topology.UpdatedAt,
TotalEcVolumes: totalEcVolumes,
TotalEcShards: totalEcShards,
} }
return adminData, nil return adminData, nil

View file

@ -5,6 +5,7 @@ import (
"context" "context"
"fmt" "fmt"
"net/http" "net/http"
"strconv"
"time" "time"
"github.com/gin-gonic/gin" "github.com/gin-gonic/gin"
@ -878,6 +879,46 @@ func (as *AdminServer) GetMaintenanceTask(c *gin.Context) {
c.JSON(http.StatusOK, task) c.JSON(http.StatusOK, task)
} }
// GetMaintenanceTaskDetailAPI returns detailed task information via API
func (as *AdminServer) GetMaintenanceTaskDetailAPI(c *gin.Context) {
taskID := c.Param("id")
taskDetail, err := as.GetMaintenanceTaskDetail(taskID)
if err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "Task detail not found", "details": err.Error()})
return
}
c.JSON(http.StatusOK, taskDetail)
}
// ShowMaintenanceTaskDetail renders the task detail page
func (as *AdminServer) ShowMaintenanceTaskDetail(c *gin.Context) {
username := c.GetString("username")
if username == "" {
username = "admin" // Default fallback
}
taskID := c.Param("id")
taskDetail, err := as.GetMaintenanceTaskDetail(taskID)
if err != nil {
c.HTML(http.StatusNotFound, "error.html", gin.H{
"error": "Task not found",
"details": err.Error(),
})
return
}
// Prepare data for template
data := gin.H{
"username": username,
"task": taskDetail.Task,
"taskDetail": taskDetail,
"title": fmt.Sprintf("Task Detail - %s", taskID),
}
c.HTML(http.StatusOK, "task_detail.html", data)
}
// CancelMaintenanceTask cancels a pending maintenance task // CancelMaintenanceTask cancels a pending maintenance task
func (as *AdminServer) CancelMaintenanceTask(c *gin.Context) { func (as *AdminServer) CancelMaintenanceTask(c *gin.Context) {
taskID := c.Param("id") taskID := c.Param("id")
@ -1041,27 +1082,65 @@ func (as *AdminServer) getMaintenanceQueueStats() (*maintenance.QueueStats, erro
// getMaintenanceTasks returns all maintenance tasks // getMaintenanceTasks returns all maintenance tasks
func (as *AdminServer) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) { func (as *AdminServer) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) {
if as.maintenanceManager == nil { if as.maintenanceManager == nil {
return []*MaintenanceTask{}, nil return []*maintenance.MaintenanceTask{}, nil
} }
return as.maintenanceManager.GetTasks(maintenance.TaskStatusPending, "", 0), nil
// Collect all tasks from memory across all statuses
allTasks := []*maintenance.MaintenanceTask{}
statuses := []maintenance.MaintenanceTaskStatus{
maintenance.TaskStatusPending,
maintenance.TaskStatusAssigned,
maintenance.TaskStatusInProgress,
maintenance.TaskStatusCompleted,
maintenance.TaskStatusFailed,
maintenance.TaskStatusCancelled,
}
for _, status := range statuses {
tasks := as.maintenanceManager.GetTasks(status, "", 0)
allTasks = append(allTasks, tasks...)
}
// Also load any persisted tasks that might not be in memory
if as.configPersistence != nil {
persistedTasks, err := as.configPersistence.LoadAllTaskStates()
if err == nil {
// Add any persisted tasks not already in memory
for _, persistedTask := range persistedTasks {
found := false
for _, memoryTask := range allTasks {
if memoryTask.ID == persistedTask.ID {
found = true
break
}
}
if !found {
allTasks = append(allTasks, persistedTask)
}
}
}
}
return allTasks, nil
} }
// getMaintenanceTask returns a specific maintenance task // getMaintenanceTask returns a specific maintenance task
func (as *AdminServer) getMaintenanceTask(taskID string) (*MaintenanceTask, error) { func (as *AdminServer) getMaintenanceTask(taskID string) (*maintenance.MaintenanceTask, error) {
if as.maintenanceManager == nil { if as.maintenanceManager == nil {
return nil, fmt.Errorf("maintenance manager not initialized") return nil, fmt.Errorf("maintenance manager not initialized")
} }
// Search for the task across all statuses since we don't know which status it has // Search for the task across all statuses since we don't know which status it has
statuses := []MaintenanceTaskStatus{ statuses := []maintenance.MaintenanceTaskStatus{
TaskStatusPending, maintenance.TaskStatusPending,
TaskStatusAssigned, maintenance.TaskStatusAssigned,
TaskStatusInProgress, maintenance.TaskStatusInProgress,
TaskStatusCompleted, maintenance.TaskStatusCompleted,
TaskStatusFailed, maintenance.TaskStatusFailed,
TaskStatusCancelled, maintenance.TaskStatusCancelled,
} }
// First, search for the task in memory across all statuses
for _, status := range statuses { for _, status := range statuses {
tasks := as.maintenanceManager.GetTasks(status, "", 0) // Get all tasks with this status tasks := as.maintenanceManager.GetTasks(status, "", 0) // Get all tasks with this status
for _, task := range tasks { for _, task := range tasks {
@ -1071,9 +1150,133 @@ func (as *AdminServer) getMaintenanceTask(taskID string) (*MaintenanceTask, erro
} }
} }
// If not found in memory, try to load from persistent storage
if as.configPersistence != nil {
task, err := as.configPersistence.LoadTaskState(taskID)
if err == nil {
glog.V(2).Infof("Loaded task %s from persistent storage", taskID)
return task, nil
}
glog.V(2).Infof("Task %s not found in persistent storage: %v", taskID, err)
}
return nil, fmt.Errorf("task %s not found", taskID) return nil, fmt.Errorf("task %s not found", taskID)
} }
// GetMaintenanceTaskDetail returns comprehensive task details including logs and assignment history
func (as *AdminServer) GetMaintenanceTaskDetail(taskID string) (*maintenance.TaskDetailData, error) {
// Get basic task information
task, err := as.getMaintenanceTask(taskID)
if err != nil {
return nil, err
}
// Create task detail structure from the loaded task
taskDetail := &maintenance.TaskDetailData{
Task: task,
AssignmentHistory: task.AssignmentHistory, // Use assignment history from persisted task
ExecutionLogs: []*maintenance.TaskExecutionLog{},
RelatedTasks: []*maintenance.MaintenanceTask{},
LastUpdated: time.Now(),
}
if taskDetail.AssignmentHistory == nil {
taskDetail.AssignmentHistory = []*maintenance.TaskAssignmentRecord{}
}
// Get worker information if task is assigned
if task.WorkerID != "" {
workers := as.maintenanceManager.GetWorkers()
for _, worker := range workers {
if worker.ID == task.WorkerID {
taskDetail.WorkerInfo = worker
break
}
}
}
// Get execution logs from worker if task is active/completed and worker is connected
if task.Status == maintenance.TaskStatusInProgress || task.Status == maintenance.TaskStatusCompleted {
if as.workerGrpcServer != nil && task.WorkerID != "" {
workerLogs, err := as.workerGrpcServer.RequestTaskLogs(task.WorkerID, taskID, 100, "")
if err == nil && len(workerLogs) > 0 {
// Convert worker logs to maintenance logs
for _, workerLog := range workerLogs {
maintenanceLog := &maintenance.TaskExecutionLog{
Timestamp: time.Unix(workerLog.Timestamp, 0),
Level: workerLog.Level,
Message: workerLog.Message,
Source: "worker",
TaskID: taskID,
WorkerID: task.WorkerID,
}
// carry structured fields if present
if len(workerLog.Fields) > 0 {
maintenanceLog.Fields = make(map[string]string, len(workerLog.Fields))
for k, v := range workerLog.Fields {
maintenanceLog.Fields[k] = v
}
}
// carry optional progress/status
if workerLog.Progress != 0 {
p := float64(workerLog.Progress)
maintenanceLog.Progress = &p
}
if workerLog.Status != "" {
maintenanceLog.Status = workerLog.Status
}
taskDetail.ExecutionLogs = append(taskDetail.ExecutionLogs, maintenanceLog)
}
} else if err != nil {
// Add a diagnostic log entry when worker logs cannot be retrieved
diagnosticLog := &maintenance.TaskExecutionLog{
Timestamp: time.Now(),
Level: "WARNING",
Message: fmt.Sprintf("Failed to retrieve worker logs: %v", err),
Source: "admin",
TaskID: taskID,
WorkerID: task.WorkerID,
}
taskDetail.ExecutionLogs = append(taskDetail.ExecutionLogs, diagnosticLog)
glog.V(1).Infof("Failed to get worker logs for task %s from worker %s: %v", taskID, task.WorkerID, err)
}
} else {
// Add diagnostic information when worker is not available
reason := "worker gRPC server not available"
if task.WorkerID == "" {
reason = "no worker assigned to task"
}
diagnosticLog := &maintenance.TaskExecutionLog{
Timestamp: time.Now(),
Level: "INFO",
Message: fmt.Sprintf("Worker logs not available: %s", reason),
Source: "admin",
TaskID: taskID,
WorkerID: task.WorkerID,
}
taskDetail.ExecutionLogs = append(taskDetail.ExecutionLogs, diagnosticLog)
}
}
// Get related tasks (other tasks on same volume/server)
if task.VolumeID != 0 || task.Server != "" {
allTasks := as.maintenanceManager.GetTasks("", "", 50) // Get recent tasks
for _, relatedTask := range allTasks {
if relatedTask.ID != taskID &&
(relatedTask.VolumeID == task.VolumeID || relatedTask.Server == task.Server) {
taskDetail.RelatedTasks = append(taskDetail.RelatedTasks, relatedTask)
}
}
}
// Save updated task detail to disk
if err := as.configPersistence.SaveTaskDetail(taskID, taskDetail); err != nil {
glog.V(1).Infof("Failed to save task detail for %s: %v", taskID, err)
}
return taskDetail, nil
}
// getMaintenanceWorkers returns all maintenance workers // getMaintenanceWorkers returns all maintenance workers
func (as *AdminServer) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) { func (as *AdminServer) getMaintenanceWorkers() ([]*maintenance.MaintenanceWorker, error) {
if as.maintenanceManager == nil { if as.maintenanceManager == nil {
@ -1157,6 +1360,34 @@ func (as *AdminServer) getMaintenanceWorkerDetails(workerID string) (*WorkerDeta
}, nil }, nil
} }
// GetWorkerLogs fetches logs from a specific worker for a task
func (as *AdminServer) GetWorkerLogs(c *gin.Context) {
workerID := c.Param("id")
taskID := c.Query("taskId")
maxEntriesStr := c.DefaultQuery("maxEntries", "100")
logLevel := c.DefaultQuery("logLevel", "")
maxEntries := int32(100)
if maxEntriesStr != "" {
if parsed, err := strconv.ParseInt(maxEntriesStr, 10, 32); err == nil {
maxEntries = int32(parsed)
}
}
if as.workerGrpcServer == nil {
c.JSON(http.StatusServiceUnavailable, gin.H{"error": "Worker gRPC server not available"})
return
}
logs, err := as.workerGrpcServer.RequestTaskLogs(workerID, taskID, maxEntries, logLevel)
if err != nil {
c.JSON(http.StatusBadGateway, gin.H{"error": fmt.Sprintf("Failed to get logs from worker: %v", err)})
return
}
c.JSON(http.StatusOK, gin.H{"worker_id": workerID, "task_id": taskID, "logs": logs, "count": len(logs)})
}
// getMaintenanceStats returns maintenance statistics // getMaintenanceStats returns maintenance statistics
func (as *AdminServer) getMaintenanceStats() (*MaintenanceStats, error) { func (as *AdminServer) getMaintenanceStats() (*MaintenanceStats, error) {
if as.maintenanceManager == nil { if as.maintenanceManager == nil {
@ -1376,6 +1607,20 @@ func (s *AdminServer) GetWorkerGrpcServer() *WorkerGrpcServer {
// InitMaintenanceManager initializes the maintenance manager // InitMaintenanceManager initializes the maintenance manager
func (s *AdminServer) InitMaintenanceManager(config *maintenance.MaintenanceConfig) { func (s *AdminServer) InitMaintenanceManager(config *maintenance.MaintenanceConfig) {
s.maintenanceManager = maintenance.NewMaintenanceManager(s, config) s.maintenanceManager = maintenance.NewMaintenanceManager(s, config)
// Set up task persistence if config persistence is available
if s.configPersistence != nil {
queue := s.maintenanceManager.GetQueue()
if queue != nil {
queue.SetPersistence(s.configPersistence)
// Load tasks from persistence on startup
if err := queue.LoadTasksFromPersistence(); err != nil {
glog.Errorf("Failed to load tasks from persistence: %v", err)
}
}
}
glog.V(1).Infof("Maintenance manager initialized (enabled: %v)", config.Enabled) glog.V(1).Infof("Maintenance manager initialized (enabled: %v)", config.Enabled)
} }

View file

@ -76,6 +76,13 @@ func (s *AdminServer) getTopologyViaGRPC(topology *ClusterTopology) error {
totalSize += int64(volInfo.Size) totalSize += int64(volInfo.Size)
totalFiles += int64(volInfo.FileCount) totalFiles += int64(volInfo.FileCount)
} }
// Sum up EC shard sizes
for _, ecShardInfo := range diskInfo.EcShardInfos {
for _, shardSize := range ecShardInfo.ShardSizes {
totalSize += shardSize
}
}
} }
vs := VolumeServer{ vs := VolumeServer{

View file

@ -1,11 +1,15 @@
package dash package dash
import ( import (
"encoding/json"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
"sort"
"strings"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/admin/maintenance"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb" "github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance" "github.com/seaweedfs/seaweedfs/weed/worker/tasks/balance"
@ -33,6 +37,12 @@ const (
BalanceTaskConfigJSONFile = "task_balance.json" BalanceTaskConfigJSONFile = "task_balance.json"
ReplicationTaskConfigJSONFile = "task_replication.json" ReplicationTaskConfigJSONFile = "task_replication.json"
// Task persistence subdirectories and settings
TasksSubdir = "tasks"
TaskDetailsSubdir = "task_details"
TaskLogsSubdir = "task_logs"
MaxCompletedTasks = 10 // Only keep last 10 completed tasks
ConfigDirPermissions = 0755 ConfigDirPermissions = 0755
ConfigFilePermissions = 0644 ConfigFilePermissions = 0644
) )
@ -45,6 +55,35 @@ type (
ReplicationTaskConfig = worker_pb.ReplicationTaskConfig ReplicationTaskConfig = worker_pb.ReplicationTaskConfig
) )
// isValidTaskID validates that a task ID is safe for use in file paths
// This prevents path traversal attacks by ensuring the task ID doesn't contain
// path separators or parent directory references
func isValidTaskID(taskID string) bool {
if taskID == "" {
return false
}
// Reject task IDs with leading or trailing whitespace
if strings.TrimSpace(taskID) != taskID {
return false
}
// Check for path traversal patterns
if strings.Contains(taskID, "/") ||
strings.Contains(taskID, "\\") ||
strings.Contains(taskID, "..") ||
strings.Contains(taskID, ":") {
return false
}
// Additional safety: ensure it's not just dots or empty after trim
if taskID == "." || taskID == ".." {
return false
}
return true
}
// ConfigPersistence handles saving and loading configuration files // ConfigPersistence handles saving and loading configuration files
type ConfigPersistence struct { type ConfigPersistence struct {
dataDir string dataDir string
@ -688,3 +727,509 @@ func buildPolicyFromTaskConfigs() *worker_pb.MaintenancePolicy {
glog.V(1).Infof("Built maintenance policy from separate task configs - %d task policies loaded", len(policy.TaskPolicies)) glog.V(1).Infof("Built maintenance policy from separate task configs - %d task policies loaded", len(policy.TaskPolicies))
return policy return policy
} }
// SaveTaskDetail saves detailed task information to disk
func (cp *ConfigPersistence) SaveTaskDetail(taskID string, detail *maintenance.TaskDetailData) error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot save task detail")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
taskDetailDir := filepath.Join(cp.dataDir, TaskDetailsSubdir)
if err := os.MkdirAll(taskDetailDir, ConfigDirPermissions); err != nil {
return fmt.Errorf("failed to create task details directory: %w", err)
}
// Save task detail as JSON for easy reading and debugging
taskDetailPath := filepath.Join(taskDetailDir, fmt.Sprintf("%s.json", taskID))
jsonData, err := json.MarshalIndent(detail, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal task detail to JSON: %w", err)
}
if err := os.WriteFile(taskDetailPath, jsonData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to write task detail file: %w", err)
}
glog.V(2).Infof("Saved task detail for task %s to %s", taskID, taskDetailPath)
return nil
}
// LoadTaskDetail loads detailed task information from disk
func (cp *ConfigPersistence) LoadTaskDetail(taskID string) (*maintenance.TaskDetailData, error) {
if cp.dataDir == "" {
return nil, fmt.Errorf("no data directory specified, cannot load task detail")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return nil, fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
taskDetailPath := filepath.Join(cp.dataDir, TaskDetailsSubdir, fmt.Sprintf("%s.json", taskID))
if _, err := os.Stat(taskDetailPath); os.IsNotExist(err) {
return nil, fmt.Errorf("task detail file not found: %s", taskID)
}
jsonData, err := os.ReadFile(taskDetailPath)
if err != nil {
return nil, fmt.Errorf("failed to read task detail file: %w", err)
}
var detail maintenance.TaskDetailData
if err := json.Unmarshal(jsonData, &detail); err != nil {
return nil, fmt.Errorf("failed to unmarshal task detail JSON: %w", err)
}
glog.V(2).Infof("Loaded task detail for task %s from %s", taskID, taskDetailPath)
return &detail, nil
}
// SaveTaskExecutionLogs saves execution logs for a task
func (cp *ConfigPersistence) SaveTaskExecutionLogs(taskID string, logs []*maintenance.TaskExecutionLog) error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot save task logs")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
taskLogsDir := filepath.Join(cp.dataDir, TaskLogsSubdir)
if err := os.MkdirAll(taskLogsDir, ConfigDirPermissions); err != nil {
return fmt.Errorf("failed to create task logs directory: %w", err)
}
// Save logs as JSON for easy reading
taskLogsPath := filepath.Join(taskLogsDir, fmt.Sprintf("%s.json", taskID))
logsData := struct {
TaskID string `json:"task_id"`
Logs []*maintenance.TaskExecutionLog `json:"logs"`
}{
TaskID: taskID,
Logs: logs,
}
jsonData, err := json.MarshalIndent(logsData, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal task logs to JSON: %w", err)
}
if err := os.WriteFile(taskLogsPath, jsonData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to write task logs file: %w", err)
}
glog.V(2).Infof("Saved %d execution logs for task %s to %s", len(logs), taskID, taskLogsPath)
return nil
}
// LoadTaskExecutionLogs loads execution logs for a task
func (cp *ConfigPersistence) LoadTaskExecutionLogs(taskID string) ([]*maintenance.TaskExecutionLog, error) {
if cp.dataDir == "" {
return nil, fmt.Errorf("no data directory specified, cannot load task logs")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return nil, fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
taskLogsPath := filepath.Join(cp.dataDir, TaskLogsSubdir, fmt.Sprintf("%s.json", taskID))
if _, err := os.Stat(taskLogsPath); os.IsNotExist(err) {
// Return empty slice if logs don't exist yet
return []*maintenance.TaskExecutionLog{}, nil
}
jsonData, err := os.ReadFile(taskLogsPath)
if err != nil {
return nil, fmt.Errorf("failed to read task logs file: %w", err)
}
var logsData struct {
TaskID string `json:"task_id"`
Logs []*maintenance.TaskExecutionLog `json:"logs"`
}
if err := json.Unmarshal(jsonData, &logsData); err != nil {
return nil, fmt.Errorf("failed to unmarshal task logs JSON: %w", err)
}
glog.V(2).Infof("Loaded %d execution logs for task %s from %s", len(logsData.Logs), taskID, taskLogsPath)
return logsData.Logs, nil
}
// DeleteTaskDetail removes task detail and logs from disk
func (cp *ConfigPersistence) DeleteTaskDetail(taskID string) error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot delete task detail")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
// Delete task detail file
taskDetailPath := filepath.Join(cp.dataDir, TaskDetailsSubdir, fmt.Sprintf("%s.json", taskID))
if err := os.Remove(taskDetailPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to delete task detail file: %w", err)
}
// Delete task logs file
taskLogsPath := filepath.Join(cp.dataDir, TaskLogsSubdir, fmt.Sprintf("%s.json", taskID))
if err := os.Remove(taskLogsPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to delete task logs file: %w", err)
}
glog.V(2).Infof("Deleted task detail and logs for task %s", taskID)
return nil
}
// ListTaskDetails returns a list of all task IDs that have stored details
func (cp *ConfigPersistence) ListTaskDetails() ([]string, error) {
if cp.dataDir == "" {
return nil, fmt.Errorf("no data directory specified, cannot list task details")
}
taskDetailDir := filepath.Join(cp.dataDir, TaskDetailsSubdir)
if _, err := os.Stat(taskDetailDir); os.IsNotExist(err) {
return []string{}, nil
}
entries, err := os.ReadDir(taskDetailDir)
if err != nil {
return nil, fmt.Errorf("failed to read task details directory: %w", err)
}
var taskIDs []string
for _, entry := range entries {
if !entry.IsDir() && filepath.Ext(entry.Name()) == ".json" {
taskID := entry.Name()[:len(entry.Name())-5] // Remove .json extension
taskIDs = append(taskIDs, taskID)
}
}
return taskIDs, nil
}
// CleanupCompletedTasks removes old completed tasks beyond the retention limit
func (cp *ConfigPersistence) CleanupCompletedTasks() error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot cleanup completed tasks")
}
tasksDir := filepath.Join(cp.dataDir, TasksSubdir)
if _, err := os.Stat(tasksDir); os.IsNotExist(err) {
return nil // No tasks directory, nothing to cleanup
}
// Load all tasks and find completed/failed ones
allTasks, err := cp.LoadAllTaskStates()
if err != nil {
return fmt.Errorf("failed to load tasks for cleanup: %w", err)
}
// Filter completed and failed tasks, sort by completion time
var completedTasks []*maintenance.MaintenanceTask
for _, task := range allTasks {
if (task.Status == maintenance.TaskStatusCompleted || task.Status == maintenance.TaskStatusFailed) && task.CompletedAt != nil {
completedTasks = append(completedTasks, task)
}
}
// Sort by completion time (most recent first)
sort.Slice(completedTasks, func(i, j int) bool {
return completedTasks[i].CompletedAt.After(*completedTasks[j].CompletedAt)
})
// Keep only the most recent MaxCompletedTasks, delete the rest
if len(completedTasks) > MaxCompletedTasks {
tasksToDelete := completedTasks[MaxCompletedTasks:]
for _, task := range tasksToDelete {
if err := cp.DeleteTaskState(task.ID); err != nil {
glog.Warningf("Failed to delete old completed task %s: %v", task.ID, err)
} else {
glog.V(2).Infof("Cleaned up old completed task %s (completed: %v)", task.ID, task.CompletedAt)
}
}
glog.V(1).Infof("Cleaned up %d old completed tasks (keeping %d most recent)", len(tasksToDelete), MaxCompletedTasks)
}
return nil
}
// SaveTaskState saves a task state to protobuf file
func (cp *ConfigPersistence) SaveTaskState(task *maintenance.MaintenanceTask) error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot save task state")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(task.ID) {
return fmt.Errorf("invalid task ID: %q contains illegal path characters", task.ID)
}
tasksDir := filepath.Join(cp.dataDir, TasksSubdir)
if err := os.MkdirAll(tasksDir, ConfigDirPermissions); err != nil {
return fmt.Errorf("failed to create tasks directory: %w", err)
}
taskFilePath := filepath.Join(tasksDir, fmt.Sprintf("%s.pb", task.ID))
// Convert task to protobuf
pbTask := cp.maintenanceTaskToProtobuf(task)
taskStateFile := &worker_pb.TaskStateFile{
Task: pbTask,
LastUpdated: time.Now().Unix(),
AdminVersion: "unknown", // TODO: add version info
}
pbData, err := proto.Marshal(taskStateFile)
if err != nil {
return fmt.Errorf("failed to marshal task state protobuf: %w", err)
}
if err := os.WriteFile(taskFilePath, pbData, ConfigFilePermissions); err != nil {
return fmt.Errorf("failed to write task state file: %w", err)
}
glog.V(2).Infof("Saved task state for task %s to %s", task.ID, taskFilePath)
return nil
}
// LoadTaskState loads a task state from protobuf file
func (cp *ConfigPersistence) LoadTaskState(taskID string) (*maintenance.MaintenanceTask, error) {
if cp.dataDir == "" {
return nil, fmt.Errorf("no data directory specified, cannot load task state")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return nil, fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
taskFilePath := filepath.Join(cp.dataDir, TasksSubdir, fmt.Sprintf("%s.pb", taskID))
if _, err := os.Stat(taskFilePath); os.IsNotExist(err) {
return nil, fmt.Errorf("task state file not found: %s", taskID)
}
pbData, err := os.ReadFile(taskFilePath)
if err != nil {
return nil, fmt.Errorf("failed to read task state file: %w", err)
}
var taskStateFile worker_pb.TaskStateFile
if err := proto.Unmarshal(pbData, &taskStateFile); err != nil {
return nil, fmt.Errorf("failed to unmarshal task state protobuf: %w", err)
}
// Convert protobuf to maintenance task
task := cp.protobufToMaintenanceTask(taskStateFile.Task)
glog.V(2).Infof("Loaded task state for task %s from %s", taskID, taskFilePath)
return task, nil
}
// LoadAllTaskStates loads all task states from disk
func (cp *ConfigPersistence) LoadAllTaskStates() ([]*maintenance.MaintenanceTask, error) {
if cp.dataDir == "" {
return []*maintenance.MaintenanceTask{}, nil
}
tasksDir := filepath.Join(cp.dataDir, TasksSubdir)
if _, err := os.Stat(tasksDir); os.IsNotExist(err) {
return []*maintenance.MaintenanceTask{}, nil
}
entries, err := os.ReadDir(tasksDir)
if err != nil {
return nil, fmt.Errorf("failed to read tasks directory: %w", err)
}
var tasks []*maintenance.MaintenanceTask
for _, entry := range entries {
if !entry.IsDir() && filepath.Ext(entry.Name()) == ".pb" {
taskID := entry.Name()[:len(entry.Name())-3] // Remove .pb extension
task, err := cp.LoadTaskState(taskID)
if err != nil {
glog.Warningf("Failed to load task state for %s: %v", taskID, err)
continue
}
tasks = append(tasks, task)
}
}
glog.V(1).Infof("Loaded %d task states from disk", len(tasks))
return tasks, nil
}
// DeleteTaskState removes a task state file from disk
func (cp *ConfigPersistence) DeleteTaskState(taskID string) error {
if cp.dataDir == "" {
return fmt.Errorf("no data directory specified, cannot delete task state")
}
// Validate task ID to prevent path traversal
if !isValidTaskID(taskID) {
return fmt.Errorf("invalid task ID: %q contains illegal path characters", taskID)
}
taskFilePath := filepath.Join(cp.dataDir, TasksSubdir, fmt.Sprintf("%s.pb", taskID))
if err := os.Remove(taskFilePath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to delete task state file: %w", err)
}
glog.V(2).Infof("Deleted task state for task %s", taskID)
return nil
}
// maintenanceTaskToProtobuf converts a MaintenanceTask to protobuf format
func (cp *ConfigPersistence) maintenanceTaskToProtobuf(task *maintenance.MaintenanceTask) *worker_pb.MaintenanceTaskData {
pbTask := &worker_pb.MaintenanceTaskData{
Id: task.ID,
Type: string(task.Type),
Priority: cp.priorityToString(task.Priority),
Status: string(task.Status),
VolumeId: task.VolumeID,
Server: task.Server,
Collection: task.Collection,
Reason: task.Reason,
CreatedAt: task.CreatedAt.Unix(),
ScheduledAt: task.ScheduledAt.Unix(),
WorkerId: task.WorkerID,
Error: task.Error,
Progress: task.Progress,
RetryCount: int32(task.RetryCount),
MaxRetries: int32(task.MaxRetries),
CreatedBy: task.CreatedBy,
CreationContext: task.CreationContext,
DetailedReason: task.DetailedReason,
Tags: task.Tags,
}
// Handle optional timestamps
if task.StartedAt != nil {
pbTask.StartedAt = task.StartedAt.Unix()
}
if task.CompletedAt != nil {
pbTask.CompletedAt = task.CompletedAt.Unix()
}
// Convert assignment history
if task.AssignmentHistory != nil {
for _, record := range task.AssignmentHistory {
pbRecord := &worker_pb.TaskAssignmentRecord{
WorkerId: record.WorkerID,
WorkerAddress: record.WorkerAddress,
AssignedAt: record.AssignedAt.Unix(),
Reason: record.Reason,
}
if record.UnassignedAt != nil {
pbRecord.UnassignedAt = record.UnassignedAt.Unix()
}
pbTask.AssignmentHistory = append(pbTask.AssignmentHistory, pbRecord)
}
}
// Convert typed parameters if available
if task.TypedParams != nil {
pbTask.TypedParams = task.TypedParams
}
return pbTask
}
// protobufToMaintenanceTask converts protobuf format to MaintenanceTask
func (cp *ConfigPersistence) protobufToMaintenanceTask(pbTask *worker_pb.MaintenanceTaskData) *maintenance.MaintenanceTask {
task := &maintenance.MaintenanceTask{
ID: pbTask.Id,
Type: maintenance.MaintenanceTaskType(pbTask.Type),
Priority: cp.stringToPriority(pbTask.Priority),
Status: maintenance.MaintenanceTaskStatus(pbTask.Status),
VolumeID: pbTask.VolumeId,
Server: pbTask.Server,
Collection: pbTask.Collection,
Reason: pbTask.Reason,
CreatedAt: time.Unix(pbTask.CreatedAt, 0),
ScheduledAt: time.Unix(pbTask.ScheduledAt, 0),
WorkerID: pbTask.WorkerId,
Error: pbTask.Error,
Progress: pbTask.Progress,
RetryCount: int(pbTask.RetryCount),
MaxRetries: int(pbTask.MaxRetries),
CreatedBy: pbTask.CreatedBy,
CreationContext: pbTask.CreationContext,
DetailedReason: pbTask.DetailedReason,
Tags: pbTask.Tags,
}
// Handle optional timestamps
if pbTask.StartedAt > 0 {
startTime := time.Unix(pbTask.StartedAt, 0)
task.StartedAt = &startTime
}
if pbTask.CompletedAt > 0 {
completedTime := time.Unix(pbTask.CompletedAt, 0)
task.CompletedAt = &completedTime
}
// Convert assignment history
if pbTask.AssignmentHistory != nil {
task.AssignmentHistory = make([]*maintenance.TaskAssignmentRecord, 0, len(pbTask.AssignmentHistory))
for _, pbRecord := range pbTask.AssignmentHistory {
record := &maintenance.TaskAssignmentRecord{
WorkerID: pbRecord.WorkerId,
WorkerAddress: pbRecord.WorkerAddress,
AssignedAt: time.Unix(pbRecord.AssignedAt, 0),
Reason: pbRecord.Reason,
}
if pbRecord.UnassignedAt > 0 {
unassignedTime := time.Unix(pbRecord.UnassignedAt, 0)
record.UnassignedAt = &unassignedTime
}
task.AssignmentHistory = append(task.AssignmentHistory, record)
}
}
// Convert typed parameters if available
if pbTask.TypedParams != nil {
task.TypedParams = pbTask.TypedParams
}
return task
}
// priorityToString converts MaintenanceTaskPriority to string for protobuf storage
func (cp *ConfigPersistence) priorityToString(priority maintenance.MaintenanceTaskPriority) string {
switch priority {
case maintenance.PriorityLow:
return "low"
case maintenance.PriorityNormal:
return "normal"
case maintenance.PriorityHigh:
return "high"
case maintenance.PriorityCritical:
return "critical"
default:
return "normal"
}
}
// stringToPriority converts string from protobuf to MaintenanceTaskPriority
func (cp *ConfigPersistence) stringToPriority(priorityStr string) maintenance.MaintenanceTaskPriority {
switch priorityStr {
case "low":
return maintenance.PriorityLow
case "normal":
return maintenance.PriorityNormal
case "high":
return maintenance.PriorityHigh
case "critical":
return maintenance.PriorityCritical
default:
return maintenance.PriorityNormal
}
}

View file

@ -13,6 +13,17 @@ import (
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding" "github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
) )
// matchesCollection checks if a volume/EC volume collection matches the filter collection.
// Handles the special case where empty collection ("") represents the "default" collection.
func matchesCollection(volumeCollection, filterCollection string) bool {
// Both empty means default collection matches default filter
if volumeCollection == "" && filterCollection == "" {
return true
}
// Direct string match for named collections
return volumeCollection == filterCollection
}
// GetClusterEcShards retrieves cluster EC shards data with pagination, sorting, and filtering // GetClusterEcShards retrieves cluster EC shards data with pagination, sorting, and filtering
func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcShardsData, error) { func (s *AdminServer) GetClusterEcShards(page int, pageSize int, sortBy string, sortOrder string, collection string) (*ClusterEcShardsData, error) {
// Set defaults // Set defaults
@ -403,7 +414,7 @@ func (s *AdminServer) GetClusterEcVolumes(page int, pageSize int, sortBy string,
var ecVolumes []EcVolumeWithShards var ecVolumes []EcVolumeWithShards
for _, volume := range volumeData { for _, volume := range volumeData {
// Filter by collection if specified // Filter by collection if specified
if collection == "" || volume.Collection == collection { if collection == "" || matchesCollection(volume.Collection, collection) {
ecVolumes = append(ecVolumes, *volume) ecVolumes = append(ecVolumes, *volume)
} }
} }

View file

@ -44,6 +44,22 @@ type VolumeServer struct {
DiskUsage int64 `json:"disk_usage"` DiskUsage int64 `json:"disk_usage"`
DiskCapacity int64 `json:"disk_capacity"` DiskCapacity int64 `json:"disk_capacity"`
LastHeartbeat time.Time `json:"last_heartbeat"` LastHeartbeat time.Time `json:"last_heartbeat"`
// EC shard information
EcVolumes int `json:"ec_volumes"` // Number of EC volumes this server has shards for
EcShards int `json:"ec_shards"` // Total number of EC shards on this server
EcShardDetails []VolumeServerEcInfo `json:"ec_shard_details"` // Detailed EC shard information
}
// VolumeServerEcInfo represents EC shard information for a specific volume on a server
type VolumeServerEcInfo struct {
VolumeID uint32 `json:"volume_id"`
Collection string `json:"collection"`
ShardCount int `json:"shard_count"` // Number of shards this server has for this volume
EcIndexBits uint32 `json:"ec_index_bits"` // Bitmap of which shards this server has
ShardNumbers []int `json:"shard_numbers"` // List of actual shard numbers this server has
ShardSizes map[int]int64 `json:"shard_sizes"` // Map from shard number to size in bytes
TotalSize int64 `json:"total_size"` // Total size of all shards on this server for this volume
} }
// S3 Bucket management structures // S3 Bucket management structures

View file

@ -7,6 +7,7 @@ import (
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
) )
// GetClusterVolumes retrieves cluster volumes data with pagination, sorting, and filtering // GetClusterVolumes retrieves cluster volumes data with pagination, sorting, and filtering
@ -26,6 +27,7 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
} }
var volumes []VolumeWithTopology var volumes []VolumeWithTopology
var totalSize int64 var totalSize int64
var cachedTopologyInfo *master_pb.TopologyInfo
// Get detailed volume information via gRPC // Get detailed volume information via gRPC
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error { err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
@ -34,11 +36,15 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
return err return err
} }
// Cache the topology info for reuse
cachedTopologyInfo = resp.TopologyInfo
if resp.TopologyInfo != nil { if resp.TopologyInfo != nil {
for _, dc := range resp.TopologyInfo.DataCenterInfos { for _, dc := range resp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos { for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos { for _, node := range rack.DataNodeInfos {
for _, diskInfo := range node.DiskInfos { for _, diskInfo := range node.DiskInfos {
// Process regular volumes
for _, volInfo := range diskInfo.VolumeInfos { for _, volInfo := range diskInfo.VolumeInfos {
volume := VolumeWithTopology{ volume := VolumeWithTopology{
VolumeInformationMessage: volInfo, VolumeInformationMessage: volInfo,
@ -49,6 +55,14 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
volumes = append(volumes, volume) volumes = append(volumes, volume)
totalSize += int64(volInfo.Size) totalSize += int64(volInfo.Size)
} }
// Process EC shards in the same loop
for _, ecShardInfo := range diskInfo.EcShardInfos {
// Add all shard sizes for this EC volume
for _, shardSize := range ecShardInfo.ShardSizes {
totalSize += shardSize
}
}
} }
} }
} }
@ -66,20 +80,38 @@ func (s *AdminServer) GetClusterVolumes(page int, pageSize int, sortBy string, s
if collection != "" { if collection != "" {
var filteredVolumes []VolumeWithTopology var filteredVolumes []VolumeWithTopology
var filteredTotalSize int64 var filteredTotalSize int64
for _, volume := range volumes { var filteredEcTotalSize int64
// Handle "default" collection filtering for empty collections
volumeCollection := volume.Collection
if volumeCollection == "" {
volumeCollection = "default"
}
if volumeCollection == collection { for _, volume := range volumes {
if matchesCollection(volume.Collection, collection) {
filteredVolumes = append(filteredVolumes, volume) filteredVolumes = append(filteredVolumes, volume)
filteredTotalSize += int64(volume.Size) filteredTotalSize += int64(volume.Size)
} }
} }
// Filter EC shard sizes by collection using already processed data
// This reuses the topology traversal done above (lines 43-71) to avoid a second pass
if cachedTopologyInfo != nil {
for _, dc := range cachedTopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
for _, diskInfo := range node.DiskInfos {
for _, ecShardInfo := range diskInfo.EcShardInfos {
if matchesCollection(ecShardInfo.Collection, collection) {
// Add all shard sizes for this EC volume
for _, shardSize := range ecShardInfo.ShardSizes {
filteredEcTotalSize += shardSize
}
}
}
}
}
}
}
}
volumes = filteredVolumes volumes = filteredVolumes
totalSize = filteredTotalSize totalSize = filteredTotalSize + filteredEcTotalSize
} }
// Calculate unique data center, rack, disk type, collection, and version counts from filtered volumes // Calculate unique data center, rack, disk type, collection, and version counts from filtered volumes
@ -370,23 +402,151 @@ func (s *AdminServer) VacuumVolume(volumeID int, server string) error {
}) })
} }
// GetClusterVolumeServers retrieves cluster volume servers data // GetClusterVolumeServers retrieves cluster volume servers data including EC shard information
func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, error) { func (s *AdminServer) GetClusterVolumeServers() (*ClusterVolumeServersData, error) {
topology, err := s.GetClusterTopology() var volumeServerMap map[string]*VolumeServer
// Make only ONE VolumeList call and use it for both topology building AND EC shard processing
err := s.WithMasterClient(func(client master_pb.SeaweedClient) error {
resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil {
return err
}
// Get volume size limit from response, default to 30GB if not set
volumeSizeLimitMB := resp.VolumeSizeLimitMb
if volumeSizeLimitMB == 0 {
volumeSizeLimitMB = 30000 // default to 30000MB (30GB)
}
// Build basic topology from the VolumeList response (replaces GetClusterTopology call)
volumeServerMap = make(map[string]*VolumeServer)
if resp.TopologyInfo != nil {
// Process topology to build basic volume server info (similar to cluster_topology.go logic)
for _, dc := range resp.TopologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
// Initialize volume server if not exists
if volumeServerMap[node.Id] == nil {
volumeServerMap[node.Id] = &VolumeServer{
Address: node.Id,
DataCenter: dc.Id,
Rack: rack.Id,
Volumes: 0,
DiskUsage: 0,
DiskCapacity: 0,
EcVolumes: 0,
EcShards: 0,
EcShardDetails: []VolumeServerEcInfo{},
}
}
vs := volumeServerMap[node.Id]
// Process EC shard information for this server at volume server level (not per-disk)
ecVolumeMap := make(map[uint32]*VolumeServerEcInfo)
// Temporary map to accumulate shard info across disks
ecShardAccumulator := make(map[uint32][]*master_pb.VolumeEcShardInformationMessage)
// Process disk information
for _, diskInfo := range node.DiskInfos {
vs.DiskCapacity += int64(diskInfo.MaxVolumeCount) * int64(volumeSizeLimitMB) * 1024 * 1024 // Use actual volume size limit
// Count regular volumes and calculate disk usage
for _, volInfo := range diskInfo.VolumeInfos {
vs.Volumes++
vs.DiskUsage += int64(volInfo.Size)
}
// Accumulate EC shard information across all disks for this volume server
for _, ecShardInfo := range diskInfo.EcShardInfos {
volumeId := ecShardInfo.Id
ecShardAccumulator[volumeId] = append(ecShardAccumulator[volumeId], ecShardInfo)
}
}
// Process accumulated EC shard information per volume
for volumeId, ecShardInfos := range ecShardAccumulator {
if len(ecShardInfos) == 0 {
continue
}
// Initialize EC volume info
ecInfo := &VolumeServerEcInfo{
VolumeID: volumeId,
Collection: ecShardInfos[0].Collection,
ShardCount: 0,
EcIndexBits: 0,
ShardNumbers: []int{},
ShardSizes: make(map[int]int64),
TotalSize: 0,
}
// Merge EcIndexBits from all disks and collect shard sizes
allShardSizes := make(map[erasure_coding.ShardId]int64)
for _, ecShardInfo := range ecShardInfos {
ecInfo.EcIndexBits |= ecShardInfo.EcIndexBits
// Collect shard sizes from this disk
shardBits := erasure_coding.ShardBits(ecShardInfo.EcIndexBits)
shardBits.EachSetIndex(func(shardId erasure_coding.ShardId) {
if size, found := erasure_coding.GetShardSize(ecShardInfo, shardId); found {
allShardSizes[shardId] = size
}
})
}
// Process final merged shard information
finalShardBits := erasure_coding.ShardBits(ecInfo.EcIndexBits)
finalShardBits.EachSetIndex(func(shardId erasure_coding.ShardId) {
ecInfo.ShardCount++
ecInfo.ShardNumbers = append(ecInfo.ShardNumbers, int(shardId))
vs.EcShards++
// Add shard size if available
if shardSize, exists := allShardSizes[shardId]; exists {
ecInfo.ShardSizes[int(shardId)] = shardSize
ecInfo.TotalSize += shardSize
vs.DiskUsage += shardSize // Add EC shard size to total disk usage
}
})
ecVolumeMap[volumeId] = ecInfo
}
// Convert EC volume map to slice and update volume server (after processing all disks)
for _, ecInfo := range ecVolumeMap {
vs.EcShardDetails = append(vs.EcShardDetails, *ecInfo)
vs.EcVolumes++
}
}
}
}
}
return nil
})
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Convert map back to slice
var volumeServers []VolumeServer
for _, vs := range volumeServerMap {
volumeServers = append(volumeServers, *vs)
}
var totalCapacity int64 var totalCapacity int64
var totalVolumes int var totalVolumes int
for _, vs := range topology.VolumeServers { for _, vs := range volumeServers {
totalCapacity += vs.DiskCapacity totalCapacity += vs.DiskCapacity
totalVolumes += vs.Volumes totalVolumes += vs.Volumes
} }
return &ClusterVolumeServersData{ return &ClusterVolumeServersData{
VolumeServers: topology.VolumeServers, VolumeServers: volumeServers,
TotalVolumeServers: len(topology.VolumeServers), TotalVolumeServers: len(volumeServers),
TotalVolumes: totalVolumes, TotalVolumes: totalVolumes,
TotalCapacity: totalCapacity, TotalCapacity: totalCapacity,
LastUpdated: time.Now(), LastUpdated: time.Now(),

View file

@ -26,6 +26,10 @@ type WorkerGrpcServer struct {
connections map[string]*WorkerConnection connections map[string]*WorkerConnection
connMutex sync.RWMutex connMutex sync.RWMutex
// Log request correlation
pendingLogRequests map[string]*LogRequestContext
logRequestsMutex sync.RWMutex
// gRPC server // gRPC server
grpcServer *grpc.Server grpcServer *grpc.Server
listener net.Listener listener net.Listener
@ -33,6 +37,14 @@ type WorkerGrpcServer struct {
stopChan chan struct{} stopChan chan struct{}
} }
// LogRequestContext tracks pending log requests
type LogRequestContext struct {
TaskID string
WorkerID string
ResponseCh chan *worker_pb.TaskLogResponse
Timeout time.Time
}
// WorkerConnection represents an active worker connection // WorkerConnection represents an active worker connection
type WorkerConnection struct { type WorkerConnection struct {
workerID string workerID string
@ -49,9 +61,10 @@ type WorkerConnection struct {
// NewWorkerGrpcServer creates a new gRPC server for worker connections // NewWorkerGrpcServer creates a new gRPC server for worker connections
func NewWorkerGrpcServer(adminServer *AdminServer) *WorkerGrpcServer { func NewWorkerGrpcServer(adminServer *AdminServer) *WorkerGrpcServer {
return &WorkerGrpcServer{ return &WorkerGrpcServer{
adminServer: adminServer, adminServer: adminServer,
connections: make(map[string]*WorkerConnection), connections: make(map[string]*WorkerConnection),
stopChan: make(chan struct{}), pendingLogRequests: make(map[string]*LogRequestContext),
stopChan: make(chan struct{}),
} }
} }
@ -264,6 +277,9 @@ func (s *WorkerGrpcServer) handleWorkerMessage(conn *WorkerConnection, msg *work
case *worker_pb.WorkerMessage_TaskComplete: case *worker_pb.WorkerMessage_TaskComplete:
s.handleTaskCompletion(conn, m.TaskComplete) s.handleTaskCompletion(conn, m.TaskComplete)
case *worker_pb.WorkerMessage_TaskLogResponse:
s.handleTaskLogResponse(conn, m.TaskLogResponse)
case *worker_pb.WorkerMessage_Shutdown: case *worker_pb.WorkerMessage_Shutdown:
glog.Infof("Worker %s shutting down: %s", workerID, m.Shutdown.Reason) glog.Infof("Worker %s shutting down: %s", workerID, m.Shutdown.Reason)
s.unregisterWorker(workerID) s.unregisterWorker(workerID)
@ -341,8 +357,13 @@ func (s *WorkerGrpcServer) handleTaskRequest(conn *WorkerConnection, request *wo
// Create basic params if none exist // Create basic params if none exist
taskParams = &worker_pb.TaskParams{ taskParams = &worker_pb.TaskParams{
VolumeId: task.VolumeID, VolumeId: task.VolumeID,
Server: task.Server,
Collection: task.Collection, Collection: task.Collection,
Sources: []*worker_pb.TaskSource{
{
Node: task.Server,
VolumeId: task.VolumeID,
},
},
} }
} }
@ -396,6 +417,35 @@ func (s *WorkerGrpcServer) handleTaskCompletion(conn *WorkerConnection, completi
} }
} }
// handleTaskLogResponse processes task log responses from workers
func (s *WorkerGrpcServer) handleTaskLogResponse(conn *WorkerConnection, response *worker_pb.TaskLogResponse) {
requestKey := fmt.Sprintf("%s:%s", response.WorkerId, response.TaskId)
s.logRequestsMutex.RLock()
requestContext, exists := s.pendingLogRequests[requestKey]
s.logRequestsMutex.RUnlock()
if !exists {
glog.Warningf("Received unexpected log response for task %s from worker %s", response.TaskId, response.WorkerId)
return
}
glog.V(1).Infof("Received log response for task %s from worker %s: %d entries", response.TaskId, response.WorkerId, len(response.LogEntries))
// Send response to waiting channel
select {
case requestContext.ResponseCh <- response:
// Response delivered successfully
case <-time.After(time.Second):
glog.Warningf("Failed to deliver log response for task %s from worker %s: timeout", response.TaskId, response.WorkerId)
}
// Clean up the pending request
s.logRequestsMutex.Lock()
delete(s.pendingLogRequests, requestKey)
s.logRequestsMutex.Unlock()
}
// unregisterWorker removes a worker connection // unregisterWorker removes a worker connection
func (s *WorkerGrpcServer) unregisterWorker(workerID string) { func (s *WorkerGrpcServer) unregisterWorker(workerID string) {
s.connMutex.Lock() s.connMutex.Lock()
@ -453,6 +503,112 @@ func (s *WorkerGrpcServer) GetConnectedWorkers() []string {
return workers return workers
} }
// RequestTaskLogs requests execution logs from a worker for a specific task
func (s *WorkerGrpcServer) RequestTaskLogs(workerID, taskID string, maxEntries int32, logLevel string) ([]*worker_pb.TaskLogEntry, error) {
s.connMutex.RLock()
conn, exists := s.connections[workerID]
s.connMutex.RUnlock()
if !exists {
return nil, fmt.Errorf("worker %s is not connected", workerID)
}
// Create response channel for this request
responseCh := make(chan *worker_pb.TaskLogResponse, 1)
requestKey := fmt.Sprintf("%s:%s", workerID, taskID)
// Register pending request
requestContext := &LogRequestContext{
TaskID: taskID,
WorkerID: workerID,
ResponseCh: responseCh,
Timeout: time.Now().Add(10 * time.Second),
}
s.logRequestsMutex.Lock()
s.pendingLogRequests[requestKey] = requestContext
s.logRequestsMutex.Unlock()
// Create log request message
logRequest := &worker_pb.AdminMessage{
AdminId: "admin-server",
Timestamp: time.Now().Unix(),
Message: &worker_pb.AdminMessage_TaskLogRequest{
TaskLogRequest: &worker_pb.TaskLogRequest{
TaskId: taskID,
WorkerId: workerID,
IncludeMetadata: true,
MaxEntries: maxEntries,
LogLevel: logLevel,
},
},
}
// Send the request through the worker's outgoing channel
select {
case conn.outgoing <- logRequest:
glog.V(1).Infof("Log request sent to worker %s for task %s", workerID, taskID)
case <-time.After(5 * time.Second):
// Clean up pending request on timeout
s.logRequestsMutex.Lock()
delete(s.pendingLogRequests, requestKey)
s.logRequestsMutex.Unlock()
return nil, fmt.Errorf("timeout sending log request to worker %s", workerID)
}
// Wait for response
select {
case response := <-responseCh:
if !response.Success {
return nil, fmt.Errorf("worker log request failed: %s", response.ErrorMessage)
}
glog.V(1).Infof("Received %d log entries for task %s from worker %s", len(response.LogEntries), taskID, workerID)
return response.LogEntries, nil
case <-time.After(10 * time.Second):
// Clean up pending request on timeout
s.logRequestsMutex.Lock()
delete(s.pendingLogRequests, requestKey)
s.logRequestsMutex.Unlock()
return nil, fmt.Errorf("timeout waiting for log response from worker %s", workerID)
}
}
// RequestTaskLogsFromAllWorkers requests logs for a task from all connected workers
func (s *WorkerGrpcServer) RequestTaskLogsFromAllWorkers(taskID string, maxEntries int32, logLevel string) (map[string][]*worker_pb.TaskLogEntry, error) {
s.connMutex.RLock()
workerIDs := make([]string, 0, len(s.connections))
for workerID := range s.connections {
workerIDs = append(workerIDs, workerID)
}
s.connMutex.RUnlock()
results := make(map[string][]*worker_pb.TaskLogEntry)
for _, workerID := range workerIDs {
logs, err := s.RequestTaskLogs(workerID, taskID, maxEntries, logLevel)
if err != nil {
glog.V(1).Infof("Failed to get logs from worker %s for task %s: %v", workerID, taskID, err)
// Store empty result with error information for debugging
results[workerID+"_error"] = []*worker_pb.TaskLogEntry{
{
Timestamp: time.Now().Unix(),
Level: "ERROR",
Message: fmt.Sprintf("Failed to retrieve logs from worker %s: %v", workerID, err),
Fields: map[string]string{"source": "admin"},
},
}
continue
}
if len(logs) > 0 {
results[workerID] = logs
} else {
glog.V(2).Infof("No logs found for task %s on worker %s", taskID, workerID)
}
}
return results, nil
}
// convertTaskParameters converts task parameters to protobuf format // convertTaskParameters converts task parameters to protobuf format
func convertTaskParameters(params map[string]interface{}) map[string]string { func convertTaskParameters(params map[string]interface{}) map[string]string {
result := make(map[string]string) result := make(map[string]string)

View file

@ -94,6 +94,7 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
protected.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig) protected.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig)
protected.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig) protected.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig)
protected.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig) protected.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig)
protected.GET("/maintenance/tasks/:id", h.maintenanceHandlers.ShowTaskDetail)
// API routes for AJAX calls // API routes for AJAX calls
api := r.Group("/api") api := r.Group("/api")
@ -164,9 +165,11 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan) maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan)
maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks) maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks)
maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask) maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask)
maintenanceApi.GET("/tasks/:id/detail", h.adminServer.GetMaintenanceTaskDetailAPI)
maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask) maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask)
maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI) maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI)
maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker) maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker)
maintenanceApi.GET("/workers/:id/logs", h.adminServer.GetWorkerLogs)
maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats) maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats)
maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI) maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI)
maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI) maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI)
@ -218,6 +221,7 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
r.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig) r.POST("/maintenance/config", h.maintenanceHandlers.UpdateMaintenanceConfig)
r.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig) r.GET("/maintenance/config/:taskType", h.maintenanceHandlers.ShowTaskConfig)
r.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig) r.POST("/maintenance/config/:taskType", h.maintenanceHandlers.UpdateTaskConfig)
r.GET("/maintenance/tasks/:id", h.maintenanceHandlers.ShowTaskDetail)
// API routes for AJAX calls // API routes for AJAX calls
api := r.Group("/api") api := r.Group("/api")
@ -287,9 +291,11 @@ func (h *AdminHandlers) SetupRoutes(r *gin.Engine, authRequired bool, username,
maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan) maintenanceApi.POST("/scan", h.adminServer.TriggerMaintenanceScan)
maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks) maintenanceApi.GET("/tasks", h.adminServer.GetMaintenanceTasks)
maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask) maintenanceApi.GET("/tasks/:id", h.adminServer.GetMaintenanceTask)
maintenanceApi.GET("/tasks/:id/detail", h.adminServer.GetMaintenanceTaskDetailAPI)
maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask) maintenanceApi.POST("/tasks/:id/cancel", h.adminServer.CancelMaintenanceTask)
maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI) maintenanceApi.GET("/workers", h.adminServer.GetMaintenanceWorkersAPI)
maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker) maintenanceApi.GET("/workers/:id", h.adminServer.GetMaintenanceWorker)
maintenanceApi.GET("/workers/:id/logs", h.adminServer.GetWorkerLogs)
maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats) maintenanceApi.GET("/stats", h.adminServer.GetMaintenanceStats)
maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI) maintenanceApi.GET("/config", h.adminServer.GetMaintenanceConfigAPI)
maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI) maintenanceApi.PUT("/config", h.adminServer.UpdateMaintenanceConfigAPI)

View file

@ -169,6 +169,12 @@ func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) {
return return
} }
// Map "default" collection to empty string for backend filtering
actualCollectionName := collectionName
if collectionName == "default" {
actualCollectionName = ""
}
// Parse query parameters // Parse query parameters
page, _ := strconv.Atoi(c.DefaultQuery("page", "1")) page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25")) pageSize, _ := strconv.Atoi(c.DefaultQuery("page_size", "25"))
@ -176,7 +182,7 @@ func (h *ClusterHandlers) ShowCollectionDetails(c *gin.Context) {
sortOrder := c.DefaultQuery("sort_order", "asc") sortOrder := c.DefaultQuery("sort_order", "asc")
// Get collection details data (volumes and EC volumes) // Get collection details data (volumes and EC volumes)
collectionDetailsData, err := h.adminServer.GetCollectionDetails(collectionName, page, pageSize, sortBy, sortOrder) collectionDetailsData, err := h.adminServer.GetCollectionDetails(actualCollectionName, page, pageSize, sortBy, sortOrder)
if err != nil { if err != nil {
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()}) c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to get collection details: " + err.Error()})
return return

View file

@ -1,6 +1,7 @@
package handlers package handlers
import ( import (
"context"
"fmt" "fmt"
"net/http" "net/http"
"reflect" "reflect"
@ -34,35 +35,82 @@ func NewMaintenanceHandlers(adminServer *dash.AdminServer) *MaintenanceHandlers
} }
} }
// ShowMaintenanceQueue displays the maintenance queue page // ShowTaskDetail displays the task detail page
func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) { func (h *MaintenanceHandlers) ShowTaskDetail(c *gin.Context) {
data, err := h.getMaintenanceQueueData() taskID := c.Param("id")
glog.Infof("DEBUG ShowTaskDetail: Starting for task ID: %s", taskID)
taskDetail, err := h.adminServer.GetMaintenanceTaskDetail(taskID)
if err != nil { if err != nil {
glog.Infof("DEBUG ShowMaintenanceQueue: error getting data: %v", err) glog.Errorf("DEBUG ShowTaskDetail: error getting task detail for %s: %v", taskID, err)
c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) c.String(http.StatusNotFound, "Task not found: %s (Error: %v)", taskID, err)
return return
} }
glog.Infof("DEBUG ShowMaintenanceQueue: got data with %d tasks", len(data.Tasks)) glog.Infof("DEBUG ShowTaskDetail: got task detail for %s, task type: %s, status: %s", taskID, taskDetail.Task.Type, taskDetail.Task.Status)
if data.Stats != nil {
glog.Infof("DEBUG ShowMaintenanceQueue: stats = {pending: %d, running: %d, completed: %d}",
data.Stats.PendingTasks, data.Stats.RunningTasks, data.Stats.CompletedToday)
} else {
glog.Infof("DEBUG ShowMaintenanceQueue: stats is nil")
}
// Render HTML template
c.Header("Content-Type", "text/html") c.Header("Content-Type", "text/html")
maintenanceComponent := app.MaintenanceQueue(data) taskDetailComponent := app.TaskDetail(taskDetail)
layoutComponent := layout.Layout(c, maintenanceComponent) layoutComponent := layout.Layout(c, taskDetailComponent)
err = layoutComponent.Render(c.Request.Context(), c.Writer) err = layoutComponent.Render(c.Request.Context(), c.Writer)
if err != nil { if err != nil {
glog.Infof("DEBUG ShowMaintenanceQueue: render error: %v", err) glog.Errorf("DEBUG ShowTaskDetail: render error: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()}) c.String(http.StatusInternalServerError, "Failed to render template: %v", err)
return return
} }
glog.Infof("DEBUG ShowMaintenanceQueue: template rendered successfully") glog.Infof("DEBUG ShowTaskDetail: template rendered successfully for task %s", taskID)
}
// ShowMaintenanceQueue displays the maintenance queue page
func (h *MaintenanceHandlers) ShowMaintenanceQueue(c *gin.Context) {
// Add timeout to prevent hanging
ctx, cancel := context.WithTimeout(c.Request.Context(), 30*time.Second)
defer cancel()
// Use a channel to handle timeout for data retrieval
type result struct {
data *maintenance.MaintenanceQueueData
err error
}
resultChan := make(chan result, 1)
go func() {
data, err := h.getMaintenanceQueueData()
resultChan <- result{data: data, err: err}
}()
select {
case res := <-resultChan:
if res.err != nil {
glog.V(1).Infof("ShowMaintenanceQueue: error getting data: %v", res.err)
c.JSON(http.StatusInternalServerError, gin.H{"error": res.err.Error()})
return
}
glog.V(2).Infof("ShowMaintenanceQueue: got data with %d tasks", len(res.data.Tasks))
// Render HTML template
c.Header("Content-Type", "text/html")
maintenanceComponent := app.MaintenanceQueue(res.data)
layoutComponent := layout.Layout(c, maintenanceComponent)
err := layoutComponent.Render(ctx, c.Writer)
if err != nil {
glog.V(1).Infof("ShowMaintenanceQueue: render error: %v", err)
c.JSON(http.StatusInternalServerError, gin.H{"error": "Failed to render template: " + err.Error()})
return
}
glog.V(3).Infof("ShowMaintenanceQueue: template rendered successfully")
case <-ctx.Done():
glog.Warningf("ShowMaintenanceQueue: timeout waiting for data")
c.JSON(http.StatusRequestTimeout, gin.H{
"error": "Request timeout - maintenance data retrieval took too long. This may indicate a system issue.",
"suggestion": "Try refreshing the page or contact system administrator if the problem persists.",
})
return
}
} }
// ShowMaintenanceWorkers displays the maintenance workers page // ShowMaintenanceWorkers displays the maintenance workers page
@ -479,7 +527,7 @@ func (h *MaintenanceHandlers) getMaintenanceQueueStats() (*maintenance.QueueStat
} }
func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) { func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceTask, error) {
// Call the maintenance manager directly to get all tasks // Call the maintenance manager directly to get recent tasks (limit for performance)
if h.adminServer == nil { if h.adminServer == nil {
return []*maintenance.MaintenanceTask{}, nil return []*maintenance.MaintenanceTask{}, nil
} }
@ -489,8 +537,9 @@ func (h *MaintenanceHandlers) getMaintenanceTasks() ([]*maintenance.MaintenanceT
return []*maintenance.MaintenanceTask{}, nil return []*maintenance.MaintenanceTask{}, nil
} }
// Get ALL tasks using empty parameters - this should match what the API returns // Get recent tasks only (last 100) to prevent slow page loads
allTasks := manager.GetTasks("", "", 0) // Users can view more tasks via pagination if needed
allTasks := manager.GetTasks("", "", 100)
return allTasks, nil return allTasks, nil
} }

View file

@ -1,20 +1,13 @@
package maintenance package maintenance
import ( import (
"context"
"fmt"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/admin/topology" "github.com/seaweedfs/seaweedfs/weed/admin/topology"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/operation"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
"github.com/seaweedfs/seaweedfs/weed/worker/tasks" "github.com/seaweedfs/seaweedfs/weed/worker/tasks"
"github.com/seaweedfs/seaweedfs/weed/worker/types" "github.com/seaweedfs/seaweedfs/weed/worker/types"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
) )
// MaintenanceIntegration bridges the task system with existing maintenance // MaintenanceIntegration bridges the task system with existing maintenance
@ -225,8 +218,9 @@ func (s *MaintenanceIntegration) ScanWithTaskDetectors(volumeMetrics []*types.Vo
// Create cluster info // Create cluster info
clusterInfo := &types.ClusterInfo{ clusterInfo := &types.ClusterInfo{
TotalVolumes: len(filteredMetrics), TotalVolumes: len(filteredMetrics),
LastUpdated: time.Now(), LastUpdated: time.Now(),
ActiveTopology: s.activeTopology, // Provide ActiveTopology for destination planning
} }
// Run detection for each registered task type // Run detection for each registered task type
@ -250,8 +244,12 @@ func (s *MaintenanceIntegration) ScanWithTaskDetectors(volumeMetrics []*types.Vo
// Double-check for conflicts with pending operations // Double-check for conflicts with pending operations
opType := s.mapMaintenanceTaskTypeToPendingOperationType(existingResult.TaskType) opType := s.mapMaintenanceTaskTypeToPendingOperationType(existingResult.TaskType)
if !s.pendingOperations.WouldConflictWithPending(existingResult.VolumeID, opType) { if !s.pendingOperations.WouldConflictWithPending(existingResult.VolumeID, opType) {
// Plan destination for operations that need it // All task types should now have TypedParams populated during detection phase
s.planDestinationForTask(existingResult, opType) if existingResult.TypedParams == nil {
glog.Warningf("Task %s for volume %d has no typed parameters - skipping (task parameter creation may have failed)",
existingResult.TaskType, existingResult.VolumeID)
continue
}
allResults = append(allResults, existingResult) allResults = append(allResults, existingResult)
} else { } else {
glog.V(2).Infof("Skipping task %s for volume %d due to conflict with pending operation", glog.V(2).Infof("Skipping task %s for volume %d due to conflict with pending operation",
@ -342,7 +340,7 @@ func (s *MaintenanceIntegration) CanScheduleWithTaskSchedulers(task *Maintenance
} }
// convertTaskToTaskSystem converts existing task to task system format using dynamic mapping // convertTaskToTaskSystem converts existing task to task system format using dynamic mapping
func (s *MaintenanceIntegration) convertTaskToTaskSystem(task *MaintenanceTask) *types.Task { func (s *MaintenanceIntegration) convertTaskToTaskSystem(task *MaintenanceTask) *types.TaskInput {
// Convert task type using mapping // Convert task type using mapping
taskType, exists := s.revTaskTypeMap[task.Type] taskType, exists := s.revTaskTypeMap[task.Type]
if !exists { if !exists {
@ -358,7 +356,7 @@ func (s *MaintenanceIntegration) convertTaskToTaskSystem(task *MaintenanceTask)
priority = types.TaskPriorityNormal priority = types.TaskPriorityNormal
} }
return &types.Task{ return &types.TaskInput{
ID: task.ID, ID: task.ID,
Type: taskType, Type: taskType,
Priority: priority, Priority: priority,
@ -371,8 +369,8 @@ func (s *MaintenanceIntegration) convertTaskToTaskSystem(task *MaintenanceTask)
} }
// convertTasksToTaskSystem converts multiple tasks // convertTasksToTaskSystem converts multiple tasks
func (s *MaintenanceIntegration) convertTasksToTaskSystem(tasks []*MaintenanceTask) []*types.Task { func (s *MaintenanceIntegration) convertTasksToTaskSystem(tasks []*MaintenanceTask) []*types.TaskInput {
var result []*types.Task var result []*types.TaskInput
for _, task := range tasks { for _, task := range tasks {
converted := s.convertTaskToTaskSystem(task) converted := s.convertTaskToTaskSystem(task)
if converted != nil { if converted != nil {
@ -383,8 +381,8 @@ func (s *MaintenanceIntegration) convertTasksToTaskSystem(tasks []*MaintenanceTa
} }
// convertWorkersToTaskSystem converts workers to task system format using dynamic mapping // convertWorkersToTaskSystem converts workers to task system format using dynamic mapping
func (s *MaintenanceIntegration) convertWorkersToTaskSystem(workers []*MaintenanceWorker) []*types.Worker { func (s *MaintenanceIntegration) convertWorkersToTaskSystem(workers []*MaintenanceWorker) []*types.WorkerData {
var result []*types.Worker var result []*types.WorkerData
for _, worker := range workers { for _, worker := range workers {
capabilities := make([]types.TaskType, 0, len(worker.Capabilities)) capabilities := make([]types.TaskType, 0, len(worker.Capabilities))
for _, cap := range worker.Capabilities { for _, cap := range worker.Capabilities {
@ -397,7 +395,7 @@ func (s *MaintenanceIntegration) convertWorkersToTaskSystem(workers []*Maintenan
} }
} }
result = append(result, &types.Worker{ result = append(result, &types.WorkerData{
ID: worker.ID, ID: worker.ID,
Address: worker.Address, Address: worker.Address,
Capabilities: capabilities, Capabilities: capabilities,
@ -489,436 +487,3 @@ func (s *MaintenanceIntegration) GetPendingOperations() *PendingOperations {
func (s *MaintenanceIntegration) GetActiveTopology() *topology.ActiveTopology { func (s *MaintenanceIntegration) GetActiveTopology() *topology.ActiveTopology {
return s.activeTopology return s.activeTopology
} }
// planDestinationForTask plans the destination for a task that requires it and creates typed protobuf parameters
func (s *MaintenanceIntegration) planDestinationForTask(task *TaskDetectionResult, opType PendingOperationType) {
// Only plan destinations for operations that move volumes/shards
if opType == OpTypeVacuum {
// For vacuum tasks, create VacuumTaskParams
s.createVacuumTaskParams(task)
return
}
glog.V(1).Infof("Planning destination for %s task on volume %d (server: %s)", task.TaskType, task.VolumeID, task.Server)
// Use ActiveTopology for destination planning
destinationPlan, err := s.planDestinationWithActiveTopology(task, opType)
if err != nil {
glog.Warningf("Failed to plan primary destination for %s task volume %d: %v",
task.TaskType, task.VolumeID, err)
// Don't return here - still try to create task params which might work with multiple destinations
}
// Create typed protobuf parameters based on operation type
switch opType {
case OpTypeErasureCoding:
if destinationPlan == nil {
glog.Warningf("Cannot create EC task for volume %d: destination planning failed", task.VolumeID)
return
}
s.createErasureCodingTaskParams(task, destinationPlan)
case OpTypeVolumeMove, OpTypeVolumeBalance:
if destinationPlan == nil {
glog.Warningf("Cannot create balance task for volume %d: destination planning failed", task.VolumeID)
return
}
s.createBalanceTaskParams(task, destinationPlan.(*topology.DestinationPlan))
case OpTypeReplication:
if destinationPlan == nil {
glog.Warningf("Cannot create replication task for volume %d: destination planning failed", task.VolumeID)
return
}
s.createReplicationTaskParams(task, destinationPlan.(*topology.DestinationPlan))
default:
glog.V(2).Infof("Unknown operation type for task %s: %v", task.TaskType, opType)
}
if destinationPlan != nil {
switch plan := destinationPlan.(type) {
case *topology.DestinationPlan:
glog.V(1).Infof("Completed destination planning for %s task on volume %d: %s -> %s",
task.TaskType, task.VolumeID, task.Server, plan.TargetNode)
case *topology.MultiDestinationPlan:
glog.V(1).Infof("Completed EC destination planning for volume %d: %s -> %d destinations (racks: %d, DCs: %d)",
task.VolumeID, task.Server, len(plan.Plans), plan.SuccessfulRack, plan.SuccessfulDCs)
}
} else {
glog.V(1).Infof("Completed destination planning for %s task on volume %d: no destination planned",
task.TaskType, task.VolumeID)
}
}
// createVacuumTaskParams creates typed parameters for vacuum tasks
func (s *MaintenanceIntegration) createVacuumTaskParams(task *TaskDetectionResult) {
// Get configuration from policy instead of using hard-coded values
vacuumConfig := GetVacuumTaskConfig(s.maintenancePolicy, MaintenanceTaskType("vacuum"))
// Use configured values or defaults if config is not available
garbageThreshold := 0.3 // Default 30%
verifyChecksum := true // Default to verify
batchSize := int32(1000) // Default batch size
workingDir := "/tmp/seaweedfs_vacuum_work" // Default working directory
if vacuumConfig != nil {
garbageThreshold = vacuumConfig.GarbageThreshold
// Note: VacuumTaskConfig has GarbageThreshold, MinVolumeAgeHours, MinIntervalSeconds
// Other fields like VerifyChecksum, BatchSize, WorkingDir would need to be added
// to the protobuf definition if they should be configurable
}
// Create typed protobuf parameters
task.TypedParams = &worker_pb.TaskParams{
VolumeId: task.VolumeID,
Server: task.Server,
Collection: task.Collection,
TaskParams: &worker_pb.TaskParams_VacuumParams{
VacuumParams: &worker_pb.VacuumTaskParams{
GarbageThreshold: garbageThreshold,
ForceVacuum: false,
BatchSize: batchSize,
WorkingDir: workingDir,
VerifyChecksum: verifyChecksum,
},
},
}
}
// planDestinationWithActiveTopology uses ActiveTopology to plan destinations
func (s *MaintenanceIntegration) planDestinationWithActiveTopology(task *TaskDetectionResult, opType PendingOperationType) (interface{}, error) {
// Get source node information from topology
var sourceRack, sourceDC string
// Extract rack and DC from topology info
topologyInfo := s.activeTopology.GetTopologyInfo()
if topologyInfo != nil {
for _, dc := range topologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, dataNodeInfo := range rack.DataNodeInfos {
if dataNodeInfo.Id == task.Server {
sourceDC = dc.Id
sourceRack = rack.Id
break
}
}
if sourceRack != "" {
break
}
}
if sourceDC != "" {
break
}
}
}
switch opType {
case OpTypeVolumeBalance, OpTypeVolumeMove:
// Plan single destination for balance operation
return s.activeTopology.PlanBalanceDestination(task.VolumeID, task.Server, sourceRack, sourceDC, 0)
case OpTypeErasureCoding:
// Plan multiple destinations for EC operation using adaptive shard counts
// Start with the default configuration, but fall back to smaller configurations if insufficient disks
totalShards := s.getOptimalECShardCount()
multiPlan, err := s.activeTopology.PlanECDestinations(task.VolumeID, task.Server, sourceRack, sourceDC, totalShards)
if err != nil {
return nil, err
}
if multiPlan != nil && len(multiPlan.Plans) > 0 {
// Return the multi-destination plan for EC
return multiPlan, nil
}
return nil, fmt.Errorf("no EC destinations found")
default:
return nil, fmt.Errorf("unsupported operation type for destination planning: %v", opType)
}
}
// createErasureCodingTaskParams creates typed parameters for EC tasks
func (s *MaintenanceIntegration) createErasureCodingTaskParams(task *TaskDetectionResult, destinationPlan interface{}) {
// Determine EC shard counts based on the number of planned destinations
multiPlan, ok := destinationPlan.(*topology.MultiDestinationPlan)
if !ok {
glog.Warningf("EC task for volume %d received unexpected destination plan type", task.VolumeID)
task.TypedParams = nil
return
}
// Use adaptive shard configuration based on actual planned destinations
totalShards := len(multiPlan.Plans)
dataShards, parityShards := s.getECShardCounts(totalShards)
// Extract disk-aware destinations from the multi-destination plan
var destinations []*worker_pb.ECDestination
var allConflicts []string
for _, plan := range multiPlan.Plans {
allConflicts = append(allConflicts, plan.Conflicts...)
// Create disk-aware destination
destinations = append(destinations, &worker_pb.ECDestination{
Node: plan.TargetNode,
DiskId: plan.TargetDisk,
Rack: plan.TargetRack,
DataCenter: plan.TargetDC,
PlacementScore: plan.PlacementScore,
})
}
glog.V(1).Infof("EC destination planning for volume %d: got %d destinations (%d+%d shards) across %d racks and %d DCs",
task.VolumeID, len(destinations), dataShards, parityShards, multiPlan.SuccessfulRack, multiPlan.SuccessfulDCs)
if len(destinations) == 0 {
glog.Warningf("No destinations available for EC task volume %d - rejecting task", task.VolumeID)
task.TypedParams = nil
return
}
// Collect existing EC shard locations for cleanup
existingShardLocations := s.collectExistingEcShardLocations(task.VolumeID)
// Create EC task parameters
ecParams := &worker_pb.ErasureCodingTaskParams{
Destinations: destinations, // Disk-aware destinations
DataShards: dataShards,
ParityShards: parityShards,
WorkingDir: "/tmp/seaweedfs_ec_work",
MasterClient: "localhost:9333",
CleanupSource: true,
ExistingShardLocations: existingShardLocations, // Pass existing shards for cleanup
}
// Add placement conflicts if any
if len(allConflicts) > 0 {
// Remove duplicates
conflictMap := make(map[string]bool)
var uniqueConflicts []string
for _, conflict := range allConflicts {
if !conflictMap[conflict] {
conflictMap[conflict] = true
uniqueConflicts = append(uniqueConflicts, conflict)
}
}
ecParams.PlacementConflicts = uniqueConflicts
}
// Wrap in TaskParams
task.TypedParams = &worker_pb.TaskParams{
VolumeId: task.VolumeID,
Server: task.Server,
Collection: task.Collection,
TaskParams: &worker_pb.TaskParams_ErasureCodingParams{
ErasureCodingParams: ecParams,
},
}
glog.V(1).Infof("Created EC task params with %d destinations for volume %d",
len(destinations), task.VolumeID)
}
// createBalanceTaskParams creates typed parameters for balance/move tasks
func (s *MaintenanceIntegration) createBalanceTaskParams(task *TaskDetectionResult, destinationPlan *topology.DestinationPlan) {
// balanceConfig could be used for future config options like ImbalanceThreshold, MinServerCount
// Create balance task parameters
balanceParams := &worker_pb.BalanceTaskParams{
DestNode: destinationPlan.TargetNode,
EstimatedSize: destinationPlan.ExpectedSize,
DestRack: destinationPlan.TargetRack,
DestDc: destinationPlan.TargetDC,
PlacementScore: destinationPlan.PlacementScore,
ForceMove: false, // Default to false
TimeoutSeconds: 300, // Default 5 minutes
}
// Add placement conflicts if any
if len(destinationPlan.Conflicts) > 0 {
balanceParams.PlacementConflicts = destinationPlan.Conflicts
}
// Note: balanceConfig would have ImbalanceThreshold, MinServerCount if needed for future enhancements
// Wrap in TaskParams
task.TypedParams = &worker_pb.TaskParams{
VolumeId: task.VolumeID,
Server: task.Server,
Collection: task.Collection,
TaskParams: &worker_pb.TaskParams_BalanceParams{
BalanceParams: balanceParams,
},
}
glog.V(1).Infof("Created balance task params for volume %d: %s -> %s (score: %.2f)",
task.VolumeID, task.Server, destinationPlan.TargetNode, destinationPlan.PlacementScore)
}
// createReplicationTaskParams creates typed parameters for replication tasks
func (s *MaintenanceIntegration) createReplicationTaskParams(task *TaskDetectionResult, destinationPlan *topology.DestinationPlan) {
// replicationConfig could be used for future config options like TargetReplicaCount
// Create replication task parameters
replicationParams := &worker_pb.ReplicationTaskParams{
DestNode: destinationPlan.TargetNode,
DestRack: destinationPlan.TargetRack,
DestDc: destinationPlan.TargetDC,
PlacementScore: destinationPlan.PlacementScore,
}
// Add placement conflicts if any
if len(destinationPlan.Conflicts) > 0 {
replicationParams.PlacementConflicts = destinationPlan.Conflicts
}
// Note: replicationConfig would have TargetReplicaCount if needed for future enhancements
// Wrap in TaskParams
task.TypedParams = &worker_pb.TaskParams{
VolumeId: task.VolumeID,
Server: task.Server,
Collection: task.Collection,
TaskParams: &worker_pb.TaskParams_ReplicationParams{
ReplicationParams: replicationParams,
},
}
glog.V(1).Infof("Created replication task params for volume %d: %s -> %s",
task.VolumeID, task.Server, destinationPlan.TargetNode)
}
// getOptimalECShardCount returns the optimal number of EC shards based on available disks
// Uses a simplified approach to avoid blocking during UI access
func (s *MaintenanceIntegration) getOptimalECShardCount() int {
// Try to get available disks quickly, but don't block if topology is busy
availableDisks := s.getAvailableDisksQuickly()
// EC configurations in order of preference: (data+parity=total)
// Use smaller configurations for smaller clusters
if availableDisks >= 14 {
glog.V(1).Infof("Using default EC configuration: 10+4=14 shards for %d available disks", availableDisks)
return 14 // Default: 10+4
} else if availableDisks >= 6 {
glog.V(1).Infof("Using small cluster EC configuration: 4+2=6 shards for %d available disks", availableDisks)
return 6 // Small cluster: 4+2
} else if availableDisks >= 4 {
glog.V(1).Infof("Using minimal EC configuration: 3+1=4 shards for %d available disks", availableDisks)
return 4 // Minimal: 3+1
} else {
glog.V(1).Infof("Using very small cluster EC configuration: 2+1=3 shards for %d available disks", availableDisks)
return 3 // Very small: 2+1
}
}
// getAvailableDisksQuickly returns available disk count with a fast path to avoid UI blocking
func (s *MaintenanceIntegration) getAvailableDisksQuickly() int {
// Use ActiveTopology's optimized disk counting if available
// Use empty task type and node filter for general availability check
allDisks := s.activeTopology.GetAvailableDisks(topology.TaskTypeErasureCoding, "")
if len(allDisks) > 0 {
return len(allDisks)
}
// Fallback: try to count from topology but don't hold locks for too long
topologyInfo := s.activeTopology.GetTopologyInfo()
return s.countAvailableDisks(topologyInfo)
}
// countAvailableDisks counts the total number of available disks in the topology
func (s *MaintenanceIntegration) countAvailableDisks(topologyInfo *master_pb.TopologyInfo) int {
if topologyInfo == nil {
return 0
}
diskCount := 0
for _, dc := range topologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, node := range rack.DataNodeInfos {
diskCount += len(node.DiskInfos)
}
}
}
return diskCount
}
// getECShardCounts determines data and parity shard counts for a given total
func (s *MaintenanceIntegration) getECShardCounts(totalShards int) (int32, int32) {
// Map total shards to (data, parity) configurations
switch totalShards {
case 14:
return 10, 4 // Default: 10+4
case 9:
return 6, 3 // Medium: 6+3
case 6:
return 4, 2 // Small: 4+2
case 4:
return 3, 1 // Minimal: 3+1
case 3:
return 2, 1 // Very small: 2+1
default:
// For any other total, try to maintain roughly 3:1 or 4:1 ratio
if totalShards >= 4 {
parityShards := totalShards / 4
if parityShards < 1 {
parityShards = 1
}
dataShards := totalShards - parityShards
return int32(dataShards), int32(parityShards)
}
// Fallback for very small clusters
return int32(totalShards - 1), 1
}
}
// collectExistingEcShardLocations queries the master for existing EC shard locations during planning
func (s *MaintenanceIntegration) collectExistingEcShardLocations(volumeId uint32) []*worker_pb.ExistingECShardLocation {
var existingShardLocations []*worker_pb.ExistingECShardLocation
// Use insecure connection for simplicity - in production this might be configurable
grpcDialOption := grpc.WithTransportCredentials(insecure.NewCredentials())
err := operation.WithMasterServerClient(false, pb.ServerAddress("localhost:9333"), grpcDialOption,
func(masterClient master_pb.SeaweedClient) error {
req := &master_pb.LookupEcVolumeRequest{
VolumeId: volumeId,
}
resp, err := masterClient.LookupEcVolume(context.Background(), req)
if err != nil {
// If volume doesn't exist as EC volume, that's fine - just no existing shards
glog.V(1).Infof("LookupEcVolume for volume %d returned: %v (this is normal if no existing EC shards)", volumeId, err)
return nil
}
// Group shard locations by server
serverShardMap := make(map[string][]uint32)
for _, shardIdLocation := range resp.ShardIdLocations {
shardId := uint32(shardIdLocation.ShardId)
for _, location := range shardIdLocation.Locations {
serverAddr := pb.NewServerAddressFromLocation(location)
serverShardMap[string(serverAddr)] = append(serverShardMap[string(serverAddr)], shardId)
}
}
// Convert to protobuf format
for serverAddr, shardIds := range serverShardMap {
existingShardLocations = append(existingShardLocations, &worker_pb.ExistingECShardLocation{
Node: serverAddr,
ShardIds: shardIds,
})
}
return nil
})
if err != nil {
glog.Errorf("Failed to lookup existing EC shards from master for volume %d: %v", volumeId, err)
// Return empty list - cleanup will be skipped but task can continue
return []*worker_pb.ExistingECShardLocation{}
}
if len(existingShardLocations) > 0 {
glog.V(1).Infof("Found existing EC shards for volume %d on %d servers during planning", volumeId, len(existingShardLocations))
}
return existingShardLocations
}

View file

@ -7,7 +7,6 @@ import (
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/glog" "github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/worker_pb"
) )
// NewMaintenanceQueue creates a new maintenance queue // NewMaintenanceQueue creates a new maintenance queue
@ -27,6 +26,102 @@ func (mq *MaintenanceQueue) SetIntegration(integration *MaintenanceIntegration)
glog.V(1).Infof("Maintenance queue configured with integration") glog.V(1).Infof("Maintenance queue configured with integration")
} }
// SetPersistence sets the task persistence interface
func (mq *MaintenanceQueue) SetPersistence(persistence TaskPersistence) {
mq.persistence = persistence
glog.V(1).Infof("Maintenance queue configured with task persistence")
}
// LoadTasksFromPersistence loads tasks from persistent storage on startup
func (mq *MaintenanceQueue) LoadTasksFromPersistence() error {
if mq.persistence == nil {
glog.V(1).Infof("No task persistence configured, skipping task loading")
return nil
}
mq.mutex.Lock()
defer mq.mutex.Unlock()
glog.Infof("Loading tasks from persistence...")
tasks, err := mq.persistence.LoadAllTaskStates()
if err != nil {
return fmt.Errorf("failed to load task states: %w", err)
}
glog.Infof("DEBUG LoadTasksFromPersistence: Found %d tasks in persistence", len(tasks))
// Reset task maps
mq.tasks = make(map[string]*MaintenanceTask)
mq.pendingTasks = make([]*MaintenanceTask, 0)
// Load tasks by status
for _, task := range tasks {
glog.Infof("DEBUG LoadTasksFromPersistence: Loading task %s (type: %s, status: %s, scheduled: %v)", task.ID, task.Type, task.Status, task.ScheduledAt)
mq.tasks[task.ID] = task
switch task.Status {
case TaskStatusPending:
glog.Infof("DEBUG LoadTasksFromPersistence: Adding task %s to pending queue", task.ID)
mq.pendingTasks = append(mq.pendingTasks, task)
case TaskStatusAssigned, TaskStatusInProgress:
// For assigned/in-progress tasks, we need to check if the worker is still available
// If not, we should fail them and make them eligible for retry
if task.WorkerID != "" {
if _, exists := mq.workers[task.WorkerID]; !exists {
glog.Warningf("Task %s was assigned to unavailable worker %s, marking as failed", task.ID, task.WorkerID)
task.Status = TaskStatusFailed
task.Error = "Worker unavailable after restart"
completedTime := time.Now()
task.CompletedAt = &completedTime
// Check if it should be retried
if task.RetryCount < task.MaxRetries {
task.RetryCount++
task.Status = TaskStatusPending
task.WorkerID = ""
task.StartedAt = nil
task.CompletedAt = nil
task.Error = ""
task.ScheduledAt = time.Now().Add(1 * time.Minute) // Retry after restart delay
glog.Infof("DEBUG LoadTasksFromPersistence: Retrying task %s, adding to pending queue", task.ID)
mq.pendingTasks = append(mq.pendingTasks, task)
}
}
}
}
}
// Sort pending tasks by priority and schedule time
sort.Slice(mq.pendingTasks, func(i, j int) bool {
if mq.pendingTasks[i].Priority != mq.pendingTasks[j].Priority {
return mq.pendingTasks[i].Priority > mq.pendingTasks[j].Priority
}
return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt)
})
glog.Infof("Loaded %d tasks from persistence (%d pending)", len(tasks), len(mq.pendingTasks))
return nil
}
// saveTaskState saves a task to persistent storage
func (mq *MaintenanceQueue) saveTaskState(task *MaintenanceTask) {
if mq.persistence != nil {
if err := mq.persistence.SaveTaskState(task); err != nil {
glog.Errorf("Failed to save task state for %s: %v", task.ID, err)
}
}
}
// cleanupCompletedTasks removes old completed tasks beyond the retention limit
func (mq *MaintenanceQueue) cleanupCompletedTasks() {
if mq.persistence != nil {
if err := mq.persistence.CleanupCompletedTasks(); err != nil {
glog.Errorf("Failed to cleanup completed tasks: %v", err)
}
}
}
// AddTask adds a new maintenance task to the queue with deduplication // AddTask adds a new maintenance task to the queue with deduplication
func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) { func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
mq.mutex.Lock() mq.mutex.Lock()
@ -44,6 +139,18 @@ func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
task.CreatedAt = time.Now() task.CreatedAt = time.Now()
task.MaxRetries = 3 // Default retry count task.MaxRetries = 3 // Default retry count
// Initialize assignment history and set creation context
task.AssignmentHistory = make([]*TaskAssignmentRecord, 0)
if task.CreatedBy == "" {
task.CreatedBy = "maintenance-system"
}
if task.CreationContext == "" {
task.CreationContext = "Automatic task creation based on system monitoring"
}
if task.Tags == nil {
task.Tags = make(map[string]string)
}
mq.tasks[task.ID] = task mq.tasks[task.ID] = task
mq.pendingTasks = append(mq.pendingTasks, task) mq.pendingTasks = append(mq.pendingTasks, task)
@ -55,6 +162,9 @@ func (mq *MaintenanceQueue) AddTask(task *MaintenanceTask) {
return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt) return mq.pendingTasks[i].ScheduledAt.Before(mq.pendingTasks[j].ScheduledAt)
}) })
// Save task state to persistence
mq.saveTaskState(task)
scheduleInfo := "" scheduleInfo := ""
if !task.ScheduledAt.IsZero() && time.Until(task.ScheduledAt) > time.Minute { if !task.ScheduledAt.IsZero() && time.Until(task.ScheduledAt) > time.Minute {
scheduleInfo = fmt.Sprintf(", scheduled for %v", task.ScheduledAt.Format("15:04:05")) scheduleInfo = fmt.Sprintf(", scheduled for %v", task.ScheduledAt.Format("15:04:05"))
@ -143,7 +253,11 @@ func (mq *MaintenanceQueue) GetNextTask(workerID string, capabilities []Maintena
// Check if this task type needs a cooldown period // Check if this task type needs a cooldown period
if !mq.canScheduleTaskNow(task) { if !mq.canScheduleTaskNow(task) {
glog.V(3).Infof("Task %s (%s) skipped for worker %s: scheduling constraints not met", task.ID, task.Type, workerID) // Add detailed diagnostic information
runningCount := mq.GetRunningTaskCount(task.Type)
maxConcurrent := mq.getMaxConcurrentForTaskType(task.Type)
glog.V(2).Infof("Task %s (%s) skipped for worker %s: scheduling constraints not met (running: %d, max: %d)",
task.ID, task.Type, workerID, runningCount, maxConcurrent)
continue continue
} }
@ -172,6 +286,26 @@ func (mq *MaintenanceQueue) GetNextTask(workerID string, capabilities []Maintena
return nil return nil
} }
// Record assignment history
workerAddress := ""
if worker, exists := mq.workers[workerID]; exists {
workerAddress = worker.Address
}
// Create assignment record
assignmentRecord := &TaskAssignmentRecord{
WorkerID: workerID,
WorkerAddress: workerAddress,
AssignedAt: now,
Reason: "Task assigned to available worker",
}
// Initialize assignment history if nil
if selectedTask.AssignmentHistory == nil {
selectedTask.AssignmentHistory = make([]*TaskAssignmentRecord, 0)
}
selectedTask.AssignmentHistory = append(selectedTask.AssignmentHistory, assignmentRecord)
// Assign the task // Assign the task
selectedTask.Status = TaskStatusAssigned selectedTask.Status = TaskStatusAssigned
selectedTask.WorkerID = workerID selectedTask.WorkerID = workerID
@ -188,6 +322,9 @@ func (mq *MaintenanceQueue) GetNextTask(workerID string, capabilities []Maintena
// Track pending operation // Track pending operation
mq.trackPendingOperation(selectedTask) mq.trackPendingOperation(selectedTask)
// Save task state after assignment
mq.saveTaskState(selectedTask)
glog.Infof("Task assigned: %s (%s) → worker %s (volume %d, server %s)", glog.Infof("Task assigned: %s (%s) → worker %s (volume %d, server %s)",
selectedTask.ID, selectedTask.Type, workerID, selectedTask.VolumeID, selectedTask.Server) selectedTask.ID, selectedTask.Type, workerID, selectedTask.VolumeID, selectedTask.Server)
@ -220,6 +357,17 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
// Check if task should be retried // Check if task should be retried
if task.RetryCount < task.MaxRetries { if task.RetryCount < task.MaxRetries {
// Record unassignment due to failure/retry
if task.WorkerID != "" && len(task.AssignmentHistory) > 0 {
lastAssignment := task.AssignmentHistory[len(task.AssignmentHistory)-1]
if lastAssignment.UnassignedAt == nil {
unassignedTime := completedTime
lastAssignment.UnassignedAt = &unassignedTime
lastAssignment.Reason = fmt.Sprintf("Task failed, scheduling retry (attempt %d/%d): %s",
task.RetryCount+1, task.MaxRetries, error)
}
}
task.RetryCount++ task.RetryCount++
task.Status = TaskStatusPending task.Status = TaskStatusPending
task.WorkerID = "" task.WorkerID = ""
@ -229,15 +377,31 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
task.ScheduledAt = time.Now().Add(15 * time.Minute) // Retry delay task.ScheduledAt = time.Now().Add(15 * time.Minute) // Retry delay
mq.pendingTasks = append(mq.pendingTasks, task) mq.pendingTasks = append(mq.pendingTasks, task)
// Save task state after retry setup
mq.saveTaskState(task)
glog.Warningf("Task failed, scheduling retry: %s (%s) attempt %d/%d, worker %s, duration %v, error: %s", glog.Warningf("Task failed, scheduling retry: %s (%s) attempt %d/%d, worker %s, duration %v, error: %s",
taskID, task.Type, task.RetryCount, task.MaxRetries, task.WorkerID, duration, error) taskID, task.Type, task.RetryCount, task.MaxRetries, task.WorkerID, duration, error)
} else { } else {
// Record unassignment due to permanent failure
if task.WorkerID != "" && len(task.AssignmentHistory) > 0 {
lastAssignment := task.AssignmentHistory[len(task.AssignmentHistory)-1]
if lastAssignment.UnassignedAt == nil {
unassignedTime := completedTime
lastAssignment.UnassignedAt = &unassignedTime
lastAssignment.Reason = fmt.Sprintf("Task failed permanently after %d retries: %s", task.MaxRetries, error)
}
}
// Save task state after permanent failure
mq.saveTaskState(task)
glog.Errorf("Task failed permanently: %s (%s) worker %s, duration %v, after %d retries: %s", glog.Errorf("Task failed permanently: %s (%s) worker %s, duration %v, after %d retries: %s",
taskID, task.Type, task.WorkerID, duration, task.MaxRetries, error) taskID, task.Type, task.WorkerID, duration, task.MaxRetries, error)
} }
} else { } else {
task.Status = TaskStatusCompleted task.Status = TaskStatusCompleted
task.Progress = 100 task.Progress = 100
// Save task state after successful completion
mq.saveTaskState(task)
glog.Infof("Task completed: %s (%s) worker %s, duration %v, volume %d", glog.Infof("Task completed: %s (%s) worker %s, duration %v, volume %d",
taskID, task.Type, task.WorkerID, duration, task.VolumeID) taskID, task.Type, task.WorkerID, duration, task.VolumeID)
} }
@ -257,6 +421,14 @@ func (mq *MaintenanceQueue) CompleteTask(taskID string, error string) {
if task.Status != TaskStatusPending { if task.Status != TaskStatusPending {
mq.removePendingOperation(taskID) mq.removePendingOperation(taskID)
} }
// Periodically cleanup old completed tasks (every 10th completion)
if task.Status == TaskStatusCompleted {
// Simple counter-based trigger for cleanup
if len(mq.tasks)%10 == 0 {
go mq.cleanupCompletedTasks()
}
}
} }
// UpdateTaskProgress updates the progress of a running task // UpdateTaskProgress updates the progress of a running task
@ -283,6 +455,11 @@ func (mq *MaintenanceQueue) UpdateTaskProgress(taskID string, progress float64)
glog.V(1).Infof("Task progress: %s (%s) worker %s, %.1f%% complete", glog.V(1).Infof("Task progress: %s (%s) worker %s, %.1f%% complete",
taskID, task.Type, task.WorkerID, progress) taskID, task.Type, task.WorkerID, progress)
} }
// Save task state after progress update
if progress == 0 || progress >= 100 || progress-oldProgress >= 10 {
mq.saveTaskState(task)
}
} else { } else {
glog.V(2).Infof("Progress update for unknown task: %s (%.1f%%)", taskID, progress) glog.V(2).Infof("Progress update for unknown task: %s (%.1f%%)", taskID, progress)
} }
@ -489,9 +666,19 @@ func (mq *MaintenanceQueue) RemoveStaleWorkers(timeout time.Duration) int {
for id, worker := range mq.workers { for id, worker := range mq.workers {
if worker.LastHeartbeat.Before(cutoff) { if worker.LastHeartbeat.Before(cutoff) {
// Mark any assigned tasks as failed // Mark any assigned tasks as failed and record unassignment
for _, task := range mq.tasks { for _, task := range mq.tasks {
if task.WorkerID == id && (task.Status == TaskStatusAssigned || task.Status == TaskStatusInProgress) { if task.WorkerID == id && (task.Status == TaskStatusAssigned || task.Status == TaskStatusInProgress) {
// Record unassignment due to worker becoming unavailable
if len(task.AssignmentHistory) > 0 {
lastAssignment := task.AssignmentHistory[len(task.AssignmentHistory)-1]
if lastAssignment.UnassignedAt == nil {
unassignedTime := time.Now()
lastAssignment.UnassignedAt = &unassignedTime
lastAssignment.Reason = "Worker became unavailable (stale heartbeat)"
}
}
task.Status = TaskStatusFailed task.Status = TaskStatusFailed
task.Error = "Worker became unavailable" task.Error = "Worker became unavailable"
completedTime := time.Now() completedTime := time.Now()
@ -600,7 +787,10 @@ func (mq *MaintenanceQueue) canExecuteTaskType(taskType MaintenanceTaskType) boo
runningCount := mq.GetRunningTaskCount(taskType) runningCount := mq.GetRunningTaskCount(taskType)
maxConcurrent := mq.getMaxConcurrentForTaskType(taskType) maxConcurrent := mq.getMaxConcurrentForTaskType(taskType)
return runningCount < maxConcurrent canExecute := runningCount < maxConcurrent
glog.V(3).Infof("canExecuteTaskType for %s: running=%d, max=%d, canExecute=%v", taskType, runningCount, maxConcurrent, canExecute)
return canExecute
} }
// getMaxConcurrentForTaskType returns the maximum concurrent tasks allowed for a task type // getMaxConcurrentForTaskType returns the maximum concurrent tasks allowed for a task type
@ -684,40 +874,28 @@ func (mq *MaintenanceQueue) trackPendingOperation(task *MaintenanceTask) {
opType = OpTypeVolumeMove opType = OpTypeVolumeMove
} }
// Determine destination node and estimated size from typed parameters // Determine destination node and estimated size from unified targets
destNode := "" destNode := ""
estimatedSize := uint64(1024 * 1024 * 1024) // Default 1GB estimate estimatedSize := uint64(1024 * 1024 * 1024) // Default 1GB estimate
switch params := task.TypedParams.TaskParams.(type) { // Use unified targets array - the only source of truth
case *worker_pb.TaskParams_ErasureCodingParams: if len(task.TypedParams.Targets) > 0 {
if params.ErasureCodingParams != nil { destNode = task.TypedParams.Targets[0].Node
if len(params.ErasureCodingParams.Destinations) > 0 { if task.TypedParams.Targets[0].EstimatedSize > 0 {
destNode = params.ErasureCodingParams.Destinations[0].Node estimatedSize = task.TypedParams.Targets[0].EstimatedSize
}
if params.ErasureCodingParams.EstimatedShardSize > 0 {
estimatedSize = params.ErasureCodingParams.EstimatedShardSize
}
}
case *worker_pb.TaskParams_BalanceParams:
if params.BalanceParams != nil {
destNode = params.BalanceParams.DestNode
if params.BalanceParams.EstimatedSize > 0 {
estimatedSize = params.BalanceParams.EstimatedSize
}
}
case *worker_pb.TaskParams_ReplicationParams:
if params.ReplicationParams != nil {
destNode = params.ReplicationParams.DestNode
if params.ReplicationParams.EstimatedSize > 0 {
estimatedSize = params.ReplicationParams.EstimatedSize
}
} }
} }
// Determine source node from unified sources
sourceNode := ""
if len(task.TypedParams.Sources) > 0 {
sourceNode = task.TypedParams.Sources[0].Node
}
operation := &PendingOperation{ operation := &PendingOperation{
VolumeID: task.VolumeID, VolumeID: task.VolumeID,
OperationType: opType, OperationType: opType,
SourceNode: task.Server, SourceNode: sourceNode,
DestNode: destNode, DestNode: destNode,
TaskID: task.ID, TaskID: task.ID,
StartTime: time.Now(), StartTime: time.Now(),

View file

@ -73,20 +73,10 @@ func (ms *MaintenanceScanner) ScanForMaintenanceTasks() ([]*TaskDetectionResult,
// getVolumeHealthMetrics collects health information for all volumes // getVolumeHealthMetrics collects health information for all volumes
func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics, error) { func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics, error) {
var metrics []*VolumeHealthMetrics var metrics []*VolumeHealthMetrics
var volumeSizeLimitMB uint64
glog.V(1).Infof("Collecting volume health metrics from master") glog.V(1).Infof("Collecting volume health metrics from master")
err := ms.adminClient.WithMasterClient(func(client master_pb.SeaweedClient) error { err := ms.adminClient.WithMasterClient(func(client master_pb.SeaweedClient) error {
// First, get volume size limit from master configuration
configResp, err := client.GetMasterConfiguration(context.Background(), &master_pb.GetMasterConfigurationRequest{})
if err != nil {
glog.Warningf("Failed to get volume size limit from master: %v", err)
volumeSizeLimitMB = 30000 // Default to 30GB if we can't get from master
} else {
volumeSizeLimitMB = uint64(configResp.VolumeSizeLimitMB)
}
// Now get volume list
resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{}) resp, err := client.VolumeList(context.Background(), &master_pb.VolumeListRequest{})
if err != nil { if err != nil {
return err return err
@ -97,7 +87,7 @@ func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics,
return nil return nil
} }
volumeSizeLimitBytes := volumeSizeLimitMB * 1024 * 1024 // Convert MB to bytes volumeSizeLimitBytes := uint64(resp.VolumeSizeLimitMb) * 1024 * 1024 // Convert MB to bytes
// Track all nodes discovered in topology // Track all nodes discovered in topology
var allNodesInTopology []string var allNodesInTopology []string
@ -127,6 +117,8 @@ func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics,
Server: node.Id, Server: node.Id,
DiskType: diskType, // Track which disk this volume is on DiskType: diskType, // Track which disk this volume is on
DiskId: volInfo.DiskId, // Use disk ID from volume info DiskId: volInfo.DiskId, // Use disk ID from volume info
DataCenter: dc.Id, // Data center from current loop
Rack: rack.Id, // Rack from current loop
Collection: volInfo.Collection, Collection: volInfo.Collection,
Size: volInfo.Size, Size: volInfo.Size,
DeletedBytes: volInfo.DeletedByteCount, DeletedBytes: volInfo.DeletedByteCount,
@ -166,7 +158,6 @@ func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics,
glog.Infof(" - Total volume servers in topology: %d (%v)", len(allNodesInTopology), allNodesInTopology) glog.Infof(" - Total volume servers in topology: %d (%v)", len(allNodesInTopology), allNodesInTopology)
glog.Infof(" - Volume servers with volumes: %d (%v)", len(nodesWithVolumes), nodesWithVolumes) glog.Infof(" - Volume servers with volumes: %d (%v)", len(nodesWithVolumes), nodesWithVolumes)
glog.Infof(" - Volume servers without volumes: %d (%v)", len(nodesWithoutVolumes), nodesWithoutVolumes) glog.Infof(" - Volume servers without volumes: %d (%v)", len(nodesWithoutVolumes), nodesWithoutVolumes)
glog.Infof("Note: Maintenance system will track empty servers separately from volume metrics.")
// Store topology info for volume shard tracker // Store topology info for volume shard tracker
ms.lastTopologyInfo = resp.TopologyInfo ms.lastTopologyInfo = resp.TopologyInfo
@ -187,11 +178,6 @@ func (ms *MaintenanceScanner) getVolumeHealthMetrics() ([]*VolumeHealthMetrics,
return metrics, nil return metrics, nil
} }
// getTopologyInfo returns the last collected topology information
func (ms *MaintenanceScanner) getTopologyInfo() *master_pb.TopologyInfo {
return ms.lastTopologyInfo
}
// enrichVolumeMetrics adds additional information like replica counts // enrichVolumeMetrics adds additional information like replica counts
func (ms *MaintenanceScanner) enrichVolumeMetrics(metrics []*VolumeHealthMetrics) { func (ms *MaintenanceScanner) enrichVolumeMetrics(metrics []*VolumeHealthMetrics) {
// Group volumes by ID to count replicas // Group volumes by ID to count replicas
@ -223,6 +209,8 @@ func (ms *MaintenanceScanner) convertToTaskMetrics(metrics []*VolumeHealthMetric
Server: metric.Server, Server: metric.Server,
DiskType: metric.DiskType, DiskType: metric.DiskType,
DiskId: metric.DiskId, DiskId: metric.DiskId,
DataCenter: metric.DataCenter,
Rack: metric.Rack,
Collection: metric.Collection, Collection: metric.Collection,
Size: metric.Size, Size: metric.Size,
DeletedBytes: metric.DeletedBytes, DeletedBytes: metric.DeletedBytes,

View file

@ -108,6 +108,57 @@ type MaintenanceTask struct {
Progress float64 `json:"progress"` // 0-100 Progress float64 `json:"progress"` // 0-100
RetryCount int `json:"retry_count"` RetryCount int `json:"retry_count"`
MaxRetries int `json:"max_retries"` MaxRetries int `json:"max_retries"`
// Enhanced fields for detailed task tracking
CreatedBy string `json:"created_by,omitempty"` // Who/what created this task
CreationContext string `json:"creation_context,omitempty"` // Additional context about creation
AssignmentHistory []*TaskAssignmentRecord `json:"assignment_history,omitempty"` // History of worker assignments
DetailedReason string `json:"detailed_reason,omitempty"` // More detailed explanation than Reason
Tags map[string]string `json:"tags,omitempty"` // Additional metadata tags
}
// TaskAssignmentRecord tracks when a task was assigned to a worker
type TaskAssignmentRecord struct {
WorkerID string `json:"worker_id"`
WorkerAddress string `json:"worker_address"`
AssignedAt time.Time `json:"assigned_at"`
UnassignedAt *time.Time `json:"unassigned_at,omitempty"`
Reason string `json:"reason"` // Why was it assigned/unassigned
}
// TaskExecutionLog represents a log entry from task execution
type TaskExecutionLog struct {
Timestamp time.Time `json:"timestamp"`
Level string `json:"level"` // "info", "warn", "error", "debug"
Message string `json:"message"`
Source string `json:"source"` // Which component logged this
TaskID string `json:"task_id"`
WorkerID string `json:"worker_id"`
// Optional structured fields carried from worker logs
Fields map[string]string `json:"fields,omitempty"`
// Optional progress/status carried from worker logs
Progress *float64 `json:"progress,omitempty"`
Status string `json:"status,omitempty"`
}
// TaskDetailData represents comprehensive information about a task for the detail view
type TaskDetailData struct {
Task *MaintenanceTask `json:"task"`
AssignmentHistory []*TaskAssignmentRecord `json:"assignment_history"`
ExecutionLogs []*TaskExecutionLog `json:"execution_logs"`
RelatedTasks []*MaintenanceTask `json:"related_tasks,omitempty"` // Other tasks on same volume/server
WorkerInfo *MaintenanceWorker `json:"worker_info,omitempty"` // Current or last assigned worker
CreationMetrics *TaskCreationMetrics `json:"creation_metrics,omitempty"` // Metrics that led to task creation
LastUpdated time.Time `json:"last_updated"`
}
// TaskCreationMetrics holds metrics that led to the task being created
type TaskCreationMetrics struct {
TriggerMetric string `json:"trigger_metric"` // What metric triggered this task
MetricValue float64 `json:"metric_value"` // Value of the trigger metric
Threshold float64 `json:"threshold"` // Threshold that was exceeded
VolumeMetrics *VolumeHealthMetrics `json:"volume_metrics,omitempty"`
AdditionalData map[string]interface{} `json:"additional_data,omitempty"`
} }
// MaintenanceConfig holds configuration for the maintenance system // MaintenanceConfig holds configuration for the maintenance system
@ -122,6 +173,15 @@ type MaintenancePolicy = worker_pb.MaintenancePolicy
// DEPRECATED: Use worker_pb.TaskPolicy instead // DEPRECATED: Use worker_pb.TaskPolicy instead
type TaskPolicy = worker_pb.TaskPolicy type TaskPolicy = worker_pb.TaskPolicy
// TaskPersistence interface for task state persistence
type TaskPersistence interface {
SaveTaskState(task *MaintenanceTask) error
LoadTaskState(taskID string) (*MaintenanceTask, error)
LoadAllTaskStates() ([]*MaintenanceTask, error)
DeleteTaskState(taskID string) error
CleanupCompletedTasks() error
}
// Default configuration values // Default configuration values
func DefaultMaintenanceConfig() *MaintenanceConfig { func DefaultMaintenanceConfig() *MaintenanceConfig {
return DefaultMaintenanceConfigProto() return DefaultMaintenanceConfigProto()
@ -273,6 +333,7 @@ type MaintenanceQueue struct {
mutex sync.RWMutex mutex sync.RWMutex
policy *MaintenancePolicy policy *MaintenancePolicy
integration *MaintenanceIntegration integration *MaintenanceIntegration
persistence TaskPersistence // Interface for task persistence
} }
// MaintenanceScanner analyzes the cluster and generates maintenance tasks // MaintenanceScanner analyzes the cluster and generates maintenance tasks
@ -301,8 +362,10 @@ type TaskDetectionResult struct {
type VolumeHealthMetrics struct { type VolumeHealthMetrics struct {
VolumeID uint32 `json:"volume_id"` VolumeID uint32 `json:"volume_id"`
Server string `json:"server"` Server string `json:"server"`
DiskType string `json:"disk_type"` // Disk type (e.g., "hdd", "ssd") or disk path (e.g., "/data1") DiskType string `json:"disk_type"` // Disk type (e.g., "hdd", "ssd") or disk path (e.g., "/data1")
DiskId uint32 `json:"disk_id"` // ID of the disk in Store.Locations array DiskId uint32 `json:"disk_id"` // ID of the disk in Store.Locations array
DataCenter string `json:"data_center"` // Data center of the server
Rack string `json:"rack"` // Rack of the server
Collection string `json:"collection"` Collection string `json:"collection"`
Size uint64 `json:"size"` Size uint64 `json:"size"`
DeletedBytes uint64 `json:"deleted_bytes"` DeletedBytes uint64 `json:"deleted_bytes"`

View file

@ -1,6 +1,7 @@
package maintenance package maintenance
import ( import (
"context"
"fmt" "fmt"
"os" "os"
"sync" "sync"
@ -131,13 +132,13 @@ func NewMaintenanceWorkerService(workerID, address, adminServer string) *Mainten
currentTasks: make(map[string]*MaintenanceTask), currentTasks: make(map[string]*MaintenanceTask),
stopChan: make(chan struct{}), stopChan: make(chan struct{}),
taskExecutors: make(map[MaintenanceTaskType]TaskExecutor), taskExecutors: make(map[MaintenanceTaskType]TaskExecutor),
taskRegistry: tasks.GetGlobalRegistry(), // Use global registry with auto-registered tasks taskRegistry: tasks.GetGlobalTaskRegistry(), // Use global registry with auto-registered tasks
} }
// Initialize task executor registry // Initialize task executor registry
worker.initializeTaskExecutors() worker.initializeTaskExecutors()
glog.V(1).Infof("Created maintenance worker with %d registered task types", len(worker.taskRegistry.GetSupportedTypes())) glog.V(1).Infof("Created maintenance worker with %d registered task types", len(worker.taskRegistry.GetAll()))
return worker return worker
} }
@ -154,16 +155,8 @@ func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) e
// Convert MaintenanceTask to types.TaskType // Convert MaintenanceTask to types.TaskType
taskType := types.TaskType(string(task.Type)) taskType := types.TaskType(string(task.Type))
// Create task parameters
taskParams := types.TaskParams{
VolumeID: task.VolumeID,
Server: task.Server,
Collection: task.Collection,
TypedParams: task.TypedParams,
}
// Create task instance using the registry // Create task instance using the registry
taskInstance, err := mws.taskRegistry.CreateTask(taskType, taskParams) taskInstance, err := mws.taskRegistry.Get(taskType).Create(task.TypedParams)
if err != nil { if err != nil {
return fmt.Errorf("failed to create task instance: %w", err) return fmt.Errorf("failed to create task instance: %w", err)
} }
@ -172,7 +165,7 @@ func (mws *MaintenanceWorkerService) executeGenericTask(task *MaintenanceTask) e
mws.updateTaskProgress(task.ID, 5) mws.updateTaskProgress(task.ID, 5)
// Execute the task // Execute the task
err = taskInstance.Execute(taskParams) err = taskInstance.Execute(context.Background(), task.TypedParams)
if err != nil { if err != nil {
return fmt.Errorf("task execution failed: %w", err) return fmt.Errorf("task execution failed: %w", err)
} }

View file

@ -1,98 +1,5 @@
package topology package topology
import (
"fmt"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
// TaskType represents different types of maintenance operations
type TaskType string
// TaskStatus represents the current status of a task
type TaskStatus string
// Common task type constants
const (
TaskTypeVacuum TaskType = "vacuum"
TaskTypeBalance TaskType = "balance"
TaskTypeErasureCoding TaskType = "erasure_coding"
TaskTypeReplication TaskType = "replication"
)
// Common task status constants
const (
TaskStatusPending TaskStatus = "pending"
TaskStatusInProgress TaskStatus = "in_progress"
TaskStatusCompleted TaskStatus = "completed"
)
// taskState represents the current state of tasks affecting the topology (internal)
type taskState struct {
VolumeID uint32 `json:"volume_id"`
TaskType TaskType `json:"task_type"`
SourceServer string `json:"source_server"`
SourceDisk uint32 `json:"source_disk"`
TargetServer string `json:"target_server,omitempty"`
TargetDisk uint32 `json:"target_disk,omitempty"`
Status TaskStatus `json:"status"`
StartedAt time.Time `json:"started_at"`
CompletedAt time.Time `json:"completed_at,omitempty"`
}
// DiskInfo represents a disk with its current state and ongoing tasks (public for external access)
type DiskInfo struct {
NodeID string `json:"node_id"`
DiskID uint32 `json:"disk_id"`
DiskType string `json:"disk_type"`
DataCenter string `json:"data_center"`
Rack string `json:"rack"`
DiskInfo *master_pb.DiskInfo `json:"disk_info"`
LoadCount int `json:"load_count"` // Number of active tasks
}
// activeDisk represents internal disk state (private)
type activeDisk struct {
*DiskInfo
pendingTasks []*taskState
assignedTasks []*taskState
recentTasks []*taskState // Completed in last N seconds
}
// activeNode represents a node with its disks (private)
type activeNode struct {
nodeID string
dataCenter string
rack string
nodeInfo *master_pb.DataNodeInfo
disks map[uint32]*activeDisk // DiskID -> activeDisk
}
// ActiveTopology provides a real-time view of cluster state with task awareness
type ActiveTopology struct {
// Core topology from master
topologyInfo *master_pb.TopologyInfo
lastUpdated time.Time
// Structured topology for easy access (private)
nodes map[string]*activeNode // NodeID -> activeNode
disks map[string]*activeDisk // "NodeID:DiskID" -> activeDisk
// Task states affecting the topology (private)
pendingTasks map[string]*taskState
assignedTasks map[string]*taskState
recentTasks map[string]*taskState
// Configuration
recentTaskWindowSeconds int
// Synchronization
mutex sync.RWMutex
}
// NewActiveTopology creates a new ActiveTopology instance // NewActiveTopology creates a new ActiveTopology instance
func NewActiveTopology(recentTaskWindowSeconds int) *ActiveTopology { func NewActiveTopology(recentTaskWindowSeconds int) *ActiveTopology {
if recentTaskWindowSeconds <= 0 { if recentTaskWindowSeconds <= 0 {
@ -102,640 +9,11 @@ func NewActiveTopology(recentTaskWindowSeconds int) *ActiveTopology {
return &ActiveTopology{ return &ActiveTopology{
nodes: make(map[string]*activeNode), nodes: make(map[string]*activeNode),
disks: make(map[string]*activeDisk), disks: make(map[string]*activeDisk),
volumeIndex: make(map[uint32][]string),
ecShardIndex: make(map[uint32][]string),
pendingTasks: make(map[string]*taskState), pendingTasks: make(map[string]*taskState),
assignedTasks: make(map[string]*taskState), assignedTasks: make(map[string]*taskState),
recentTasks: make(map[string]*taskState), recentTasks: make(map[string]*taskState),
recentTaskWindowSeconds: recentTaskWindowSeconds, recentTaskWindowSeconds: recentTaskWindowSeconds,
} }
} }
// UpdateTopology updates the topology information from master
func (at *ActiveTopology) UpdateTopology(topologyInfo *master_pb.TopologyInfo) error {
at.mutex.Lock()
defer at.mutex.Unlock()
at.topologyInfo = topologyInfo
at.lastUpdated = time.Now()
// Rebuild structured topology
at.nodes = make(map[string]*activeNode)
at.disks = make(map[string]*activeDisk)
for _, dc := range topologyInfo.DataCenterInfos {
for _, rack := range dc.RackInfos {
for _, nodeInfo := range rack.DataNodeInfos {
node := &activeNode{
nodeID: nodeInfo.Id,
dataCenter: dc.Id,
rack: rack.Id,
nodeInfo: nodeInfo,
disks: make(map[uint32]*activeDisk),
}
// Add disks for this node
for diskType, diskInfo := range nodeInfo.DiskInfos {
disk := &activeDisk{
DiskInfo: &DiskInfo{
NodeID: nodeInfo.Id,
DiskID: diskInfo.DiskId,
DiskType: diskType,
DataCenter: dc.Id,
Rack: rack.Id,
DiskInfo: diskInfo,
},
}
diskKey := fmt.Sprintf("%s:%d", nodeInfo.Id, diskInfo.DiskId)
node.disks[diskInfo.DiskId] = disk
at.disks[diskKey] = disk
}
at.nodes[nodeInfo.Id] = node
}
}
}
// Reassign task states to updated topology
at.reassignTaskStates()
glog.V(1).Infof("ActiveTopology updated: %d nodes, %d disks", len(at.nodes), len(at.disks))
return nil
}
// AddPendingTask adds a pending task to the topology
func (at *ActiveTopology) AddPendingTask(taskID string, taskType TaskType, volumeID uint32,
sourceServer string, sourceDisk uint32, targetServer string, targetDisk uint32) {
at.mutex.Lock()
defer at.mutex.Unlock()
task := &taskState{
VolumeID: volumeID,
TaskType: taskType,
SourceServer: sourceServer,
SourceDisk: sourceDisk,
TargetServer: targetServer,
TargetDisk: targetDisk,
Status: TaskStatusPending,
StartedAt: time.Now(),
}
at.pendingTasks[taskID] = task
at.assignTaskToDisk(task)
}
// AssignTask moves a task from pending to assigned
func (at *ActiveTopology) AssignTask(taskID string) error {
at.mutex.Lock()
defer at.mutex.Unlock()
task, exists := at.pendingTasks[taskID]
if !exists {
return fmt.Errorf("pending task %s not found", taskID)
}
delete(at.pendingTasks, taskID)
task.Status = TaskStatusInProgress
at.assignedTasks[taskID] = task
at.reassignTaskStates()
return nil
}
// CompleteTask moves a task from assigned to recent
func (at *ActiveTopology) CompleteTask(taskID string) error {
at.mutex.Lock()
defer at.mutex.Unlock()
task, exists := at.assignedTasks[taskID]
if !exists {
return fmt.Errorf("assigned task %s not found", taskID)
}
delete(at.assignedTasks, taskID)
task.Status = TaskStatusCompleted
task.CompletedAt = time.Now()
at.recentTasks[taskID] = task
at.reassignTaskStates()
// Clean up old recent tasks
at.cleanupRecentTasks()
return nil
}
// GetAvailableDisks returns disks that can accept new tasks of the given type
func (at *ActiveTopology) GetAvailableDisks(taskType TaskType, excludeNodeID string) []*DiskInfo {
at.mutex.RLock()
defer at.mutex.RUnlock()
var available []*DiskInfo
for _, disk := range at.disks {
if disk.NodeID == excludeNodeID {
continue // Skip excluded node
}
if at.isDiskAvailable(disk, taskType) {
// Create a copy with current load count
diskCopy := *disk.DiskInfo
diskCopy.LoadCount = len(disk.pendingTasks) + len(disk.assignedTasks)
available = append(available, &diskCopy)
}
}
return available
}
// GetDiskLoad returns the current load on a disk (number of active tasks)
func (at *ActiveTopology) GetDiskLoad(nodeID string, diskID uint32) int {
at.mutex.RLock()
defer at.mutex.RUnlock()
diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
disk, exists := at.disks[diskKey]
if !exists {
return 0
}
return len(disk.pendingTasks) + len(disk.assignedTasks)
}
// HasRecentTaskForVolume checks if a volume had a recent task (to avoid immediate re-detection)
func (at *ActiveTopology) HasRecentTaskForVolume(volumeID uint32, taskType TaskType) bool {
at.mutex.RLock()
defer at.mutex.RUnlock()
for _, task := range at.recentTasks {
if task.VolumeID == volumeID && task.TaskType == taskType {
return true
}
}
return false
}
// GetAllNodes returns information about all nodes (public interface)
func (at *ActiveTopology) GetAllNodes() map[string]*master_pb.DataNodeInfo {
at.mutex.RLock()
defer at.mutex.RUnlock()
result := make(map[string]*master_pb.DataNodeInfo)
for nodeID, node := range at.nodes {
result[nodeID] = node.nodeInfo
}
return result
}
// GetTopologyInfo returns the current topology information (read-only access)
func (at *ActiveTopology) GetTopologyInfo() *master_pb.TopologyInfo {
at.mutex.RLock()
defer at.mutex.RUnlock()
return at.topologyInfo
}
// GetNodeDisks returns all disks for a specific node
func (at *ActiveTopology) GetNodeDisks(nodeID string) []*DiskInfo {
at.mutex.RLock()
defer at.mutex.RUnlock()
node, exists := at.nodes[nodeID]
if !exists {
return nil
}
var disks []*DiskInfo
for _, disk := range node.disks {
diskCopy := *disk.DiskInfo
diskCopy.LoadCount = len(disk.pendingTasks) + len(disk.assignedTasks)
disks = append(disks, &diskCopy)
}
return disks
}
// DestinationPlan represents a planned destination for a volume/shard operation
type DestinationPlan struct {
TargetNode string `json:"target_node"`
TargetDisk uint32 `json:"target_disk"`
TargetRack string `json:"target_rack"`
TargetDC string `json:"target_dc"`
ExpectedSize uint64 `json:"expected_size"`
PlacementScore float64 `json:"placement_score"`
Conflicts []string `json:"conflicts"`
}
// MultiDestinationPlan represents multiple planned destinations for operations like EC
type MultiDestinationPlan struct {
Plans []*DestinationPlan `json:"plans"`
TotalShards int `json:"total_shards"`
SuccessfulRack int `json:"successful_racks"`
SuccessfulDCs int `json:"successful_dcs"`
}
// PlanBalanceDestination finds the best destination for a balance operation
func (at *ActiveTopology) PlanBalanceDestination(volumeID uint32, sourceNode string, sourceRack string, sourceDC string, volumeSize uint64) (*DestinationPlan, error) {
at.mutex.RLock()
defer at.mutex.RUnlock()
// Get available disks, excluding the source node
availableDisks := at.getAvailableDisksForPlanning(TaskTypeBalance, sourceNode)
if len(availableDisks) == 0 {
return nil, fmt.Errorf("no available disks for balance operation")
}
// Score each disk for balance placement
bestDisk := at.selectBestBalanceDestination(availableDisks, sourceRack, sourceDC, volumeSize)
if bestDisk == nil {
return nil, fmt.Errorf("no suitable destination found for balance operation")
}
return &DestinationPlan{
TargetNode: bestDisk.NodeID,
TargetDisk: bestDisk.DiskID,
TargetRack: bestDisk.Rack,
TargetDC: bestDisk.DataCenter,
ExpectedSize: volumeSize,
PlacementScore: at.calculatePlacementScore(bestDisk, sourceRack, sourceDC),
Conflicts: at.checkPlacementConflicts(bestDisk, TaskTypeBalance),
}, nil
}
// PlanECDestinations finds multiple destinations for EC shard distribution
func (at *ActiveTopology) PlanECDestinations(volumeID uint32, sourceNode string, sourceRack string, sourceDC string, shardsNeeded int) (*MultiDestinationPlan, error) {
at.mutex.RLock()
defer at.mutex.RUnlock()
// Get available disks for EC placement
availableDisks := at.getAvailableDisksForPlanning(TaskTypeErasureCoding, "")
if len(availableDisks) < shardsNeeded {
return nil, fmt.Errorf("insufficient disks for EC placement: need %d, have %d", shardsNeeded, len(availableDisks))
}
// Select best disks for EC placement with rack/DC diversity
selectedDisks := at.selectBestECDestinations(availableDisks, sourceRack, sourceDC, shardsNeeded)
if len(selectedDisks) < shardsNeeded {
return nil, fmt.Errorf("could not find %d suitable destinations for EC placement", shardsNeeded)
}
var plans []*DestinationPlan
rackCount := make(map[string]int)
dcCount := make(map[string]int)
for _, disk := range selectedDisks {
plan := &DestinationPlan{
TargetNode: disk.NodeID,
TargetDisk: disk.DiskID,
TargetRack: disk.Rack,
TargetDC: disk.DataCenter,
ExpectedSize: 0, // EC shards don't have predetermined size
PlacementScore: at.calculatePlacementScore(disk, sourceRack, sourceDC),
Conflicts: at.checkPlacementConflicts(disk, TaskTypeErasureCoding),
}
plans = append(plans, plan)
// Count rack and DC diversity
rackKey := fmt.Sprintf("%s:%s", disk.DataCenter, disk.Rack)
rackCount[rackKey]++
dcCount[disk.DataCenter]++
}
return &MultiDestinationPlan{
Plans: plans,
TotalShards: len(plans),
SuccessfulRack: len(rackCount),
SuccessfulDCs: len(dcCount),
}, nil
}
// getAvailableDisksForPlanning returns disks available for destination planning
func (at *ActiveTopology) getAvailableDisksForPlanning(taskType TaskType, excludeNodeID string) []*activeDisk {
var available []*activeDisk
for _, disk := range at.disks {
if excludeNodeID != "" && disk.NodeID == excludeNodeID {
continue // Skip excluded node
}
if at.isDiskAvailable(disk, taskType) {
available = append(available, disk)
}
}
return available
}
// selectBestBalanceDestination selects the best disk for balance operation
func (at *ActiveTopology) selectBestBalanceDestination(disks []*activeDisk, sourceRack string, sourceDC string, volumeSize uint64) *activeDisk {
if len(disks) == 0 {
return nil
}
var bestDisk *activeDisk
bestScore := -1.0
for _, disk := range disks {
score := at.calculateBalanceScore(disk, sourceRack, sourceDC, volumeSize)
if score > bestScore {
bestScore = score
bestDisk = disk
}
}
return bestDisk
}
// selectBestECDestinations selects multiple disks for EC shard placement with diversity
func (at *ActiveTopology) selectBestECDestinations(disks []*activeDisk, sourceRack string, sourceDC string, shardsNeeded int) []*activeDisk {
if len(disks) == 0 {
return nil
}
// Group disks by rack and DC for diversity
rackGroups := make(map[string][]*activeDisk)
for _, disk := range disks {
rackKey := fmt.Sprintf("%s:%s", disk.DataCenter, disk.Rack)
rackGroups[rackKey] = append(rackGroups[rackKey], disk)
}
var selected []*activeDisk
usedRacks := make(map[string]bool)
// First pass: select one disk from each rack for maximum diversity
for rackKey, rackDisks := range rackGroups {
if len(selected) >= shardsNeeded {
break
}
// Select best disk from this rack
bestDisk := at.selectBestFromRack(rackDisks, sourceRack, sourceDC)
if bestDisk != nil {
selected = append(selected, bestDisk)
usedRacks[rackKey] = true
}
}
// Second pass: if we need more disks, select from racks we've already used
if len(selected) < shardsNeeded {
for _, disk := range disks {
if len(selected) >= shardsNeeded {
break
}
// Skip if already selected
alreadySelected := false
for _, sel := range selected {
if sel.NodeID == disk.NodeID && sel.DiskID == disk.DiskID {
alreadySelected = true
break
}
}
if !alreadySelected && at.isDiskAvailable(disk, TaskTypeErasureCoding) {
selected = append(selected, disk)
}
}
}
return selected
}
// selectBestFromRack selects the best disk from a rack
func (at *ActiveTopology) selectBestFromRack(disks []*activeDisk, sourceRack string, sourceDC string) *activeDisk {
if len(disks) == 0 {
return nil
}
var bestDisk *activeDisk
bestScore := -1.0
for _, disk := range disks {
if !at.isDiskAvailable(disk, TaskTypeErasureCoding) {
continue
}
score := at.calculateECScore(disk, sourceRack, sourceDC)
if score > bestScore {
bestScore = score
bestDisk = disk
}
}
return bestDisk
}
// calculateBalanceScore calculates placement score for balance operations
func (at *ActiveTopology) calculateBalanceScore(disk *activeDisk, sourceRack string, sourceDC string, volumeSize uint64) float64 {
score := 0.0
// Prefer disks with lower load
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
score += (2.0 - float64(activeLoad)) * 40.0 // Max 80 points for load
// Prefer disks with more free space
if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
freeRatio := float64(disk.DiskInfo.DiskInfo.MaxVolumeCount-disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
score += freeRatio * 20.0 // Max 20 points for free space
}
// Rack diversity bonus (prefer different rack)
if disk.Rack != sourceRack {
score += 10.0
}
// DC diversity bonus (prefer different DC)
if disk.DataCenter != sourceDC {
score += 5.0
}
return score
}
// calculateECScore calculates placement score for EC operations
func (at *ActiveTopology) calculateECScore(disk *activeDisk, sourceRack string, sourceDC string) float64 {
score := 0.0
// Prefer disks with lower load
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
score += (2.0 - float64(activeLoad)) * 30.0 // Max 60 points for load
// Prefer disks with more free space
if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
freeRatio := float64(disk.DiskInfo.DiskInfo.MaxVolumeCount-disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
score += freeRatio * 20.0 // Max 20 points for free space
}
// Strong rack diversity preference for EC
if disk.Rack != sourceRack {
score += 20.0
}
// Strong DC diversity preference for EC
if disk.DataCenter != sourceDC {
score += 15.0
}
return score
}
// calculatePlacementScore calculates overall placement quality score
func (at *ActiveTopology) calculatePlacementScore(disk *activeDisk, sourceRack string, sourceDC string) float64 {
score := 0.0
// Load factor
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
loadScore := (2.0 - float64(activeLoad)) / 2.0 // Normalize to 0-1
score += loadScore * 0.4
// Capacity factor
if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
freeRatio := float64(disk.DiskInfo.DiskInfo.MaxVolumeCount-disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
score += freeRatio * 0.3
}
// Diversity factor
diversityScore := 0.0
if disk.Rack != sourceRack {
diversityScore += 0.5
}
if disk.DataCenter != sourceDC {
diversityScore += 0.5
}
score += diversityScore * 0.3
return score // Score between 0.0 and 1.0
}
// checkPlacementConflicts checks for placement rule violations
func (at *ActiveTopology) checkPlacementConflicts(disk *activeDisk, taskType TaskType) []string {
var conflicts []string
// Check load limits
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
if activeLoad >= 2 {
conflicts = append(conflicts, fmt.Sprintf("disk_load_high_%d", activeLoad))
}
// Check capacity limits
if disk.DiskInfo.DiskInfo.MaxVolumeCount > 0 {
usageRatio := float64(disk.DiskInfo.DiskInfo.VolumeCount) / float64(disk.DiskInfo.DiskInfo.MaxVolumeCount)
if usageRatio > 0.9 {
conflicts = append(conflicts, "disk_capacity_high")
}
}
// Check for conflicting task types
for _, task := range disk.assignedTasks {
if at.areTaskTypesConflicting(task.TaskType, taskType) {
conflicts = append(conflicts, fmt.Sprintf("task_conflict_%s", task.TaskType))
}
}
return conflicts
}
// Private methods
// reassignTaskStates assigns tasks to the appropriate disks
func (at *ActiveTopology) reassignTaskStates() {
// Clear existing task assignments
for _, disk := range at.disks {
disk.pendingTasks = nil
disk.assignedTasks = nil
disk.recentTasks = nil
}
// Reassign pending tasks
for _, task := range at.pendingTasks {
at.assignTaskToDisk(task)
}
// Reassign assigned tasks
for _, task := range at.assignedTasks {
at.assignTaskToDisk(task)
}
// Reassign recent tasks
for _, task := range at.recentTasks {
at.assignTaskToDisk(task)
}
}
// assignTaskToDisk assigns a task to the appropriate disk(s)
func (at *ActiveTopology) assignTaskToDisk(task *taskState) {
// Assign to source disk
sourceKey := fmt.Sprintf("%s:%d", task.SourceServer, task.SourceDisk)
if sourceDisk, exists := at.disks[sourceKey]; exists {
switch task.Status {
case TaskStatusPending:
sourceDisk.pendingTasks = append(sourceDisk.pendingTasks, task)
case TaskStatusInProgress:
sourceDisk.assignedTasks = append(sourceDisk.assignedTasks, task)
case TaskStatusCompleted:
sourceDisk.recentTasks = append(sourceDisk.recentTasks, task)
}
}
// Assign to target disk if it exists and is different from source
if task.TargetServer != "" && (task.TargetServer != task.SourceServer || task.TargetDisk != task.SourceDisk) {
targetKey := fmt.Sprintf("%s:%d", task.TargetServer, task.TargetDisk)
if targetDisk, exists := at.disks[targetKey]; exists {
switch task.Status {
case TaskStatusPending:
targetDisk.pendingTasks = append(targetDisk.pendingTasks, task)
case TaskStatusInProgress:
targetDisk.assignedTasks = append(targetDisk.assignedTasks, task)
case TaskStatusCompleted:
targetDisk.recentTasks = append(targetDisk.recentTasks, task)
}
}
}
}
// isDiskAvailable checks if a disk can accept new tasks
func (at *ActiveTopology) isDiskAvailable(disk *activeDisk, taskType TaskType) bool {
// Check if disk has too many active tasks
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
if activeLoad >= 2 { // Max 2 concurrent tasks per disk
return false
}
// Check for conflicting task types
for _, task := range disk.assignedTasks {
if at.areTaskTypesConflicting(task.TaskType, taskType) {
return false
}
}
return true
}
// areTaskTypesConflicting checks if two task types conflict
func (at *ActiveTopology) areTaskTypesConflicting(existing, new TaskType) bool {
// Examples of conflicting task types
conflictMap := map[TaskType][]TaskType{
TaskTypeVacuum: {TaskTypeBalance, TaskTypeErasureCoding},
TaskTypeBalance: {TaskTypeVacuum, TaskTypeErasureCoding},
TaskTypeErasureCoding: {TaskTypeVacuum, TaskTypeBalance},
}
if conflicts, exists := conflictMap[existing]; exists {
for _, conflictType := range conflicts {
if conflictType == new {
return true
}
}
}
return false
}
// cleanupRecentTasks removes old recent tasks
func (at *ActiveTopology) cleanupRecentTasks() {
cutoff := time.Now().Add(-time.Duration(at.recentTaskWindowSeconds) * time.Second)
for taskID, task := range at.recentTasks {
if task.CompletedAt.Before(cutoff) {
delete(at.recentTasks, taskID)
}
}
}

View file

@ -1,15 +1,25 @@
package topology package topology
import ( import (
"fmt"
"testing" "testing"
"time" "time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb" "github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
// Helper function to find a disk by ID for testing - reduces code duplication
func findDiskByID(disks []*DiskInfo, diskID uint32) *DiskInfo {
for _, disk := range disks {
if disk.DiskID == diskID {
return disk
}
}
return nil
}
// TestActiveTopologyBasicOperations tests basic topology management // TestActiveTopologyBasicOperations tests basic topology management
func TestActiveTopologyBasicOperations(t *testing.T) { func TestActiveTopologyBasicOperations(t *testing.T) {
topology := NewActiveTopology(10) topology := NewActiveTopology(10)
@ -59,8 +69,19 @@ func TestTaskLifecycle(t *testing.T) {
taskID := "balance-001" taskID := "balance-001"
// 1. Add pending task // 1. Add pending task
topology.AddPendingTask(taskID, TaskTypeBalance, 1001, err := topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 0, "10.0.0.2:8080", 1) TaskID: taskID,
TaskType: TaskTypeBalance,
VolumeID: 1001,
VolumeSize: 1024 * 1024 * 1024,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 0},
},
Destinations: []TaskDestinationSpec{
{ServerID: "10.0.0.2:8080", DiskID: 1},
},
})
assert.NoError(t, err, "Should add pending task successfully")
// Verify pending state // Verify pending state
assert.Equal(t, 1, len(topology.pendingTasks)) assert.Equal(t, 1, len(topology.pendingTasks))
@ -78,7 +99,7 @@ func TestTaskLifecycle(t *testing.T) {
assert.Equal(t, 1, len(targetDisk.pendingTasks)) assert.Equal(t, 1, len(targetDisk.pendingTasks))
// 2. Assign task // 2. Assign task
err := topology.AssignTask(taskID) err = topology.AssignTask(taskID)
require.NoError(t, err) require.NoError(t, err)
// Verify assigned state // Verify assigned state
@ -259,8 +280,7 @@ func TestTargetSelectionScenarios(t *testing.T) {
assert.NotEqual(t, tt.excludeNode, disk.NodeID, assert.NotEqual(t, tt.excludeNode, disk.NodeID,
"Available disk should not be on excluded node") "Available disk should not be on excluded node")
load := tt.topology.GetDiskLoad(disk.NodeID, disk.DiskID) assert.Less(t, disk.LoadCount, 2, "Disk load should be less than 2")
assert.Less(t, load, 2, "Disk load should be less than 2")
} }
}) })
} }
@ -272,37 +292,65 @@ func TestDiskLoadCalculation(t *testing.T) {
topology.UpdateTopology(createSampleTopology()) topology.UpdateTopology(createSampleTopology())
// Initially no load // Initially no load
load := topology.GetDiskLoad("10.0.0.1:8080", 0) disks := topology.GetNodeDisks("10.0.0.1:8080")
assert.Equal(t, 0, load) targetDisk := findDiskByID(disks, 0)
require.NotNil(t, targetDisk, "Should find disk with ID 0")
assert.Equal(t, 0, targetDisk.LoadCount)
// Add pending task // Add pending task
topology.AddPendingTask("task1", TaskTypeBalance, 1001, err := topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 0, "10.0.0.2:8080", 1) TaskID: "task1",
TaskType: TaskTypeBalance,
VolumeID: 1001,
VolumeSize: 1024 * 1024 * 1024,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 0},
},
Destinations: []TaskDestinationSpec{
{ServerID: "10.0.0.2:8080", DiskID: 1},
},
})
assert.NoError(t, err, "Should add pending task successfully")
// Check load increased // Check load increased
load = topology.GetDiskLoad("10.0.0.1:8080", 0) disks = topology.GetNodeDisks("10.0.0.1:8080")
assert.Equal(t, 1, load) targetDisk = findDiskByID(disks, 0)
assert.Equal(t, 1, targetDisk.LoadCount)
// Add another task to same disk // Add another task to same disk
topology.AddPendingTask("task2", TaskTypeVacuum, 1002, err = topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 0, "", 0) TaskID: "task2",
TaskType: TaskTypeVacuum,
VolumeID: 1002,
VolumeSize: 0,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 0},
},
Destinations: []TaskDestinationSpec{
{ServerID: "", DiskID: 0}, // Vacuum doesn't have a destination
},
})
assert.NoError(t, err, "Should add vacuum task successfully")
load = topology.GetDiskLoad("10.0.0.1:8080", 0) disks = topology.GetNodeDisks("10.0.0.1:8080")
assert.Equal(t, 2, load) targetDisk = findDiskByID(disks, 0)
assert.Equal(t, 2, targetDisk.LoadCount)
// Move one task to assigned // Move one task to assigned
topology.AssignTask("task1") topology.AssignTask("task1")
// Load should still be 2 (1 pending + 1 assigned) // Load should still be 2 (1 pending + 1 assigned)
load = topology.GetDiskLoad("10.0.0.1:8080", 0) disks = topology.GetNodeDisks("10.0.0.1:8080")
assert.Equal(t, 2, load) targetDisk = findDiskByID(disks, 0)
assert.Equal(t, 2, targetDisk.LoadCount)
// Complete one task // Complete one task
topology.CompleteTask("task1") topology.CompleteTask("task1")
// Load should decrease to 1 // Load should decrease to 1
load = topology.GetDiskLoad("10.0.0.1:8080", 0) disks = topology.GetNodeDisks("10.0.0.1:8080")
assert.Equal(t, 1, load) targetDisk = findDiskByID(disks, 0)
assert.Equal(t, 1, targetDisk.LoadCount)
} }
// TestTaskConflictDetection tests task conflict detection // TestTaskConflictDetection tests task conflict detection
@ -311,8 +359,19 @@ func TestTaskConflictDetection(t *testing.T) {
topology.UpdateTopology(createSampleTopology()) topology.UpdateTopology(createSampleTopology())
// Add a balance task // Add a balance task
topology.AddPendingTask("balance1", TaskTypeBalance, 1001, err := topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 0, "10.0.0.2:8080", 1) TaskID: "balance1",
TaskType: TaskTypeBalance,
VolumeID: 1001,
VolumeSize: 1024 * 1024 * 1024,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 0},
},
Destinations: []TaskDestinationSpec{
{ServerID: "10.0.0.2:8080", DiskID: 1},
},
})
assert.NoError(t, err, "Should add balance task successfully")
topology.AssignTask("balance1") topology.AssignTask("balance1")
// Try to get available disks for vacuum (conflicts with balance) // Try to get available disks for vacuum (conflicts with balance)
@ -449,8 +508,22 @@ func createTopologyWithLoad() *ActiveTopology {
topology.UpdateTopology(createSampleTopology()) topology.UpdateTopology(createSampleTopology())
// Add some existing tasks to create load // Add some existing tasks to create load
topology.AddPendingTask("existing1", TaskTypeVacuum, 2001, err := topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 0, "", 0) TaskID: "existing1",
TaskType: TaskTypeVacuum,
VolumeID: 2001,
VolumeSize: 0,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 0},
},
Destinations: []TaskDestinationSpec{
{ServerID: "", DiskID: 0}, // Vacuum doesn't have a destination
},
})
if err != nil {
// In test helper function, just log error instead of failing
fmt.Printf("Warning: Failed to add existing task: %v\n", err)
}
topology.AssignTask("existing1") topology.AssignTask("existing1")
return topology return topology
@ -467,188 +540,68 @@ func createTopologyWithConflicts() *ActiveTopology {
topology.UpdateTopology(createSampleTopology()) topology.UpdateTopology(createSampleTopology())
// Add conflicting tasks // Add conflicting tasks
topology.AddPendingTask("balance1", TaskTypeBalance, 3001, err := topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 0, "10.0.0.2:8080", 0) TaskID: "balance1",
TaskType: TaskTypeBalance,
VolumeID: 3001,
VolumeSize: 1024 * 1024 * 1024,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 0},
},
Destinations: []TaskDestinationSpec{
{ServerID: "10.0.0.2:8080", DiskID: 0},
},
})
if err != nil {
fmt.Printf("Warning: Failed to add balance task: %v\n", err)
}
topology.AssignTask("balance1") topology.AssignTask("balance1")
topology.AddPendingTask("ec1", TaskTypeErasureCoding, 3002, err = topology.AddPendingTask(TaskSpec{
"10.0.0.1:8080", 1, "", 0) TaskID: "ec1",
TaskType: TaskTypeErasureCoding,
VolumeID: 3002,
VolumeSize: 1024 * 1024 * 1024,
Sources: []TaskSourceSpec{
{ServerID: "10.0.0.1:8080", DiskID: 1},
},
Destinations: []TaskDestinationSpec{
{ServerID: "", DiskID: 0}, // EC doesn't have single destination
},
})
if err != nil {
fmt.Printf("Warning: Failed to add EC task: %v\n", err)
}
topology.AssignTask("ec1") topology.AssignTask("ec1")
return topology return topology
} }
// TestDestinationPlanning tests destination planning functionality // TestDestinationPlanning tests that the public interface works correctly
// NOTE: Destination planning is now done in task detection phase, not in ActiveTopology
func TestDestinationPlanning(t *testing.T) { func TestDestinationPlanning(t *testing.T) {
topology := NewActiveTopology(10) topology := NewActiveTopology(10)
topology.UpdateTopology(createSampleTopology()) topology.UpdateTopology(createSampleTopology())
// Test balance destination planning // Test that GetAvailableDisks works for destination planning
t.Run("Balance destination planning", func(t *testing.T) { t.Run("GetAvailableDisks functionality", func(t *testing.T) {
plan, err := topology.PlanBalanceDestination(1001, "10.0.0.1:8080", "rack1", "dc1", 1024*1024) // 1MB availableDisks := topology.GetAvailableDisks(TaskTypeBalance, "10.0.0.1:8080")
require.NoError(t, err) assert.Greater(t, len(availableDisks), 0)
require.NotNil(t, plan)
// Should not target the source node // Should exclude the source node
assert.NotEqual(t, "10.0.0.1:8080", plan.TargetNode) for _, disk := range availableDisks {
assert.Equal(t, "10.0.0.2:8080", plan.TargetNode) assert.NotEqual(t, "10.0.0.1:8080", disk.NodeID)
assert.NotEmpty(t, plan.TargetRack)
assert.NotEmpty(t, plan.TargetDC)
assert.Greater(t, plan.PlacementScore, 0.0)
})
// Test EC destination planning
t.Run("EC destination planning", func(t *testing.T) {
multiPlan, err := topology.PlanECDestinations(1002, "10.0.0.1:8080", "rack1", "dc1", 3) // Ask for 3 shards - source node can be included
require.NoError(t, err)
require.NotNil(t, multiPlan)
assert.Greater(t, len(multiPlan.Plans), 0)
assert.LessOrEqual(t, len(multiPlan.Plans), 3) // Should get at most 3 shards
assert.Equal(t, len(multiPlan.Plans), multiPlan.TotalShards)
// Check that all plans have valid target nodes
for _, plan := range multiPlan.Plans {
assert.NotEmpty(t, plan.TargetNode)
assert.NotEmpty(t, plan.TargetRack)
assert.NotEmpty(t, plan.TargetDC)
assert.GreaterOrEqual(t, plan.PlacementScore, 0.0)
} }
// Check diversity metrics
assert.GreaterOrEqual(t, multiPlan.SuccessfulRack, 1)
assert.GreaterOrEqual(t, multiPlan.SuccessfulDCs, 1)
}) })
// Test destination planning with load // Test that topology state can be used for planning
t.Run("Destination planning considers load", func(t *testing.T) { t.Run("Topology provides planning information", func(t *testing.T) {
// Add load to one disk topologyInfo := topology.GetTopologyInfo()
topology.AddPendingTask("task1", TaskTypeBalance, 2001, assert.NotNil(t, topologyInfo)
"10.0.0.2:8080", 0, "", 0) assert.Greater(t, len(topologyInfo.DataCenterInfos), 0)
plan, err := topology.PlanBalanceDestination(1003, "10.0.0.1:8080", "rack1", "dc1", 1024*1024) // Test getting node disks
require.NoError(t, err) disks := topology.GetNodeDisks("10.0.0.1:8080")
require.NotNil(t, plan) assert.Greater(t, len(disks), 0)
// Should prefer less loaded disk (disk 1 over disk 0 on node2)
assert.Equal(t, "10.0.0.2:8080", plan.TargetNode)
assert.Equal(t, uint32(1), plan.TargetDisk) // Should prefer SSD (disk 1) which has no load
})
// Test insufficient destinations
t.Run("Handle insufficient destinations", func(t *testing.T) {
// Try to plan for more EC shards than available disks
multiPlan, err := topology.PlanECDestinations(1004, "10.0.0.1:8080", "rack1", "dc1", 100)
// Should get an error for insufficient disks
assert.Error(t, err)
assert.Nil(t, multiPlan)
})
}
// TestDestinationPlanningWithActiveTopology tests the integration between task detection and destination planning
func TestDestinationPlanningWithActiveTopology(t *testing.T) {
topology := NewActiveTopology(10)
topology.UpdateTopology(createUnbalancedTopology())
// Test that tasks are created with destinations
t.Run("Balance task with destination", func(t *testing.T) {
// Simulate what the balance detector would create
sourceNode := "10.0.0.1:8080" // Overloaded node
volumeID := uint32(1001)
plan, err := topology.PlanBalanceDestination(volumeID, sourceNode, "rack1", "dc1", 1024*1024)
require.NoError(t, err)
require.NotNil(t, plan)
// Verify the destination is different from source
assert.NotEqual(t, sourceNode, plan.TargetNode)
assert.Equal(t, "10.0.0.2:8080", plan.TargetNode) // Should be the lightly loaded node
// Verify placement quality
assert.Greater(t, plan.PlacementScore, 0.0)
assert.LessOrEqual(t, plan.PlacementScore, 1.0)
})
// Test task state integration
t.Run("Task state affects future planning", func(t *testing.T) {
volumeID := uint32(1002)
sourceNode := "10.0.0.1:8080"
targetNode := "10.0.0.2:8080"
// Plan first destination
plan1, err := topology.PlanBalanceDestination(volumeID, sourceNode, "rack1", "dc1", 1024*1024)
require.NoError(t, err)
require.NotNil(t, plan1)
// Add a pending task to the target
topology.AddPendingTask("task1", TaskTypeBalance, volumeID, sourceNode, 0, targetNode, 0)
// Plan another destination - should consider the pending task load
plan2, err := topology.PlanBalanceDestination(1003, sourceNode, "rack1", "dc1", 1024*1024)
require.NoError(t, err)
require.NotNil(t, plan2)
// The placement score should reflect the increased load
// (This test might need adjustment based on the actual scoring algorithm)
glog.V(1).Infof("Plan1 score: %.3f, Plan2 score: %.3f", plan1.PlacementScore, plan2.PlacementScore)
})
}
// TestECDestinationPlanningDetailed tests the EC destination planning with multiple shards
func TestECDestinationPlanningDetailed(t *testing.T) {
topology := NewActiveTopology(10)
topology.UpdateTopology(createSampleTopology())
t.Run("EC multiple destinations", func(t *testing.T) {
// Plan for 3 EC shards (now including source node, we have 4 disks total)
multiPlan, err := topology.PlanECDestinations(1005, "10.0.0.1:8080", "rack1", "dc1", 3)
require.NoError(t, err)
require.NotNil(t, multiPlan)
// Should get 3 destinations (can include source node's disks)
assert.Equal(t, 3, len(multiPlan.Plans))
assert.Equal(t, 3, multiPlan.TotalShards)
// Count node distribution - source node can now be included
nodeCount := make(map[string]int)
for _, plan := range multiPlan.Plans {
nodeCount[plan.TargetNode]++
}
// Should distribute across available nodes (both nodes can be used)
assert.GreaterOrEqual(t, len(nodeCount), 1, "Should use at least 1 node")
assert.LessOrEqual(t, len(nodeCount), 2, "Should use at most 2 nodes")
glog.V(1).Infof("EC destinations node distribution: %v", nodeCount)
glog.V(1).Infof("EC destinations: %d plans across %d racks, %d DCs",
multiPlan.TotalShards, multiPlan.SuccessfulRack, multiPlan.SuccessfulDCs)
})
t.Run("EC destination planning with task conflicts", func(t *testing.T) {
// Create a fresh topology for this test to avoid conflicts from previous test
freshTopology := NewActiveTopology(10)
freshTopology.UpdateTopology(createSampleTopology())
// Add tasks to create conflicts on some disks
freshTopology.AddPendingTask("conflict1", TaskTypeVacuum, 2001, "10.0.0.2:8080", 0, "", 0)
freshTopology.AddPendingTask("conflict2", TaskTypeBalance, 2002, "10.0.0.1:8080", 0, "", 0)
freshTopology.AssignTask("conflict1")
freshTopology.AssignTask("conflict2")
// Plan EC destinations - should still succeed using available disks
multiPlan, err := freshTopology.PlanECDestinations(1006, "10.0.0.1:8080", "rack1", "dc1", 2)
require.NoError(t, err)
require.NotNil(t, multiPlan)
// Should get destinations (using disks that don't have conflicts)
assert.GreaterOrEqual(t, len(multiPlan.Plans), 1)
assert.LessOrEqual(t, len(multiPlan.Plans), 2)
// Available disks should be: node1/disk1 and node2/disk1 (since disk0 on both nodes have conflicts)
for _, plan := range multiPlan.Plans {
assert.Equal(t, uint32(1), plan.TargetDisk, "Should prefer disk 1 which has no conflicts")
}
glog.V(1).Infof("EC destination planning with conflicts: found %d destinations", len(multiPlan.Plans))
}) })
} }

View file

@ -0,0 +1,300 @@
package topology
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
// GetEffectiveAvailableCapacity returns the effective available capacity for a disk
// This considers BOTH pending and assigned tasks for capacity reservation.
//
// Formula: BaseAvailable - (VolumeSlots + ShardSlots/ShardsPerVolumeSlot) from all tasks
//
// The calculation includes:
// - Pending tasks: Reserve capacity immediately when added
// - Assigned tasks: Continue to reserve capacity during execution
// - Recently completed tasks are NOT counted against capacity
func (at *ActiveTopology) GetEffectiveAvailableCapacity(nodeID string, diskID uint32) int64 {
at.mutex.RLock()
defer at.mutex.RUnlock()
diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
disk, exists := at.disks[diskKey]
if !exists {
return 0
}
if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil {
return 0
}
// Use the same logic as getEffectiveAvailableCapacityUnsafe but with locking
capacity := at.getEffectiveAvailableCapacityUnsafe(disk)
return int64(capacity.VolumeSlots)
}
// GetEffectiveAvailableCapacityDetailed returns detailed available capacity as StorageSlotChange
// This provides granular information about available volume slots and shard slots
func (at *ActiveTopology) GetEffectiveAvailableCapacityDetailed(nodeID string, diskID uint32) StorageSlotChange {
at.mutex.RLock()
defer at.mutex.RUnlock()
diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
disk, exists := at.disks[diskKey]
if !exists {
return StorageSlotChange{}
}
if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil {
return StorageSlotChange{}
}
return at.getEffectiveAvailableCapacityUnsafe(disk)
}
// GetEffectiveCapacityImpact returns the StorageSlotChange impact for a disk
// This shows the net impact from all pending and assigned tasks
func (at *ActiveTopology) GetEffectiveCapacityImpact(nodeID string, diskID uint32) StorageSlotChange {
at.mutex.RLock()
defer at.mutex.RUnlock()
diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
disk, exists := at.disks[diskKey]
if !exists {
return StorageSlotChange{}
}
return at.getEffectiveCapacityUnsafe(disk)
}
// GetDisksWithEffectiveCapacity returns disks with sufficient effective capacity
// This method considers BOTH pending and assigned tasks for capacity reservation using StorageSlotChange.
//
// Parameters:
// - taskType: type of task to check compatibility for
// - excludeNodeID: node to exclude from results
// - minCapacity: minimum effective capacity required (in volume slots)
//
// Returns: DiskInfo objects where VolumeCount reflects capacity reserved by all tasks
func (at *ActiveTopology) GetDisksWithEffectiveCapacity(taskType TaskType, excludeNodeID string, minCapacity int64) []*DiskInfo {
at.mutex.RLock()
defer at.mutex.RUnlock()
var available []*DiskInfo
for _, disk := range at.disks {
if disk.NodeID == excludeNodeID {
continue // Skip excluded node
}
if at.isDiskAvailable(disk, taskType) {
effectiveCapacity := at.getEffectiveAvailableCapacityUnsafe(disk)
// Only include disks that meet minimum capacity requirement
if int64(effectiveCapacity.VolumeSlots) >= minCapacity {
// Create a new DiskInfo with current capacity information
diskCopy := DiskInfo{
NodeID: disk.DiskInfo.NodeID,
DiskID: disk.DiskInfo.DiskID,
DiskType: disk.DiskInfo.DiskType,
DataCenter: disk.DiskInfo.DataCenter,
Rack: disk.DiskInfo.Rack,
LoadCount: len(disk.pendingTasks) + len(disk.assignedTasks), // Count all tasks
}
// Create a new protobuf DiskInfo to avoid modifying the original
diskInfoCopy := &master_pb.DiskInfo{
DiskId: disk.DiskInfo.DiskInfo.DiskId,
MaxVolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount,
VolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount - int64(effectiveCapacity.VolumeSlots),
VolumeInfos: disk.DiskInfo.DiskInfo.VolumeInfos,
EcShardInfos: disk.DiskInfo.DiskInfo.EcShardInfos,
RemoteVolumeCount: disk.DiskInfo.DiskInfo.RemoteVolumeCount,
ActiveVolumeCount: disk.DiskInfo.DiskInfo.ActiveVolumeCount,
FreeVolumeCount: disk.DiskInfo.DiskInfo.FreeVolumeCount,
}
diskCopy.DiskInfo = diskInfoCopy
available = append(available, &diskCopy)
}
}
}
return available
}
// GetDisksForPlanning returns disks considering both active and pending tasks for planning decisions
// This helps avoid over-scheduling tasks to the same disk
func (at *ActiveTopology) GetDisksForPlanning(taskType TaskType, excludeNodeID string, minCapacity int64) []*DiskInfo {
at.mutex.RLock()
defer at.mutex.RUnlock()
var available []*DiskInfo
for _, disk := range at.disks {
if disk.NodeID == excludeNodeID {
continue // Skip excluded node
}
// Consider both pending and active tasks for scheduling decisions
if at.isDiskAvailableForPlanning(disk, taskType) {
// Check if disk can accommodate new task considering pending tasks
planningCapacity := at.getPlanningCapacityUnsafe(disk)
if int64(planningCapacity.VolumeSlots) >= minCapacity {
// Create a new DiskInfo with planning information
diskCopy := DiskInfo{
NodeID: disk.DiskInfo.NodeID,
DiskID: disk.DiskInfo.DiskID,
DiskType: disk.DiskInfo.DiskType,
DataCenter: disk.DiskInfo.DataCenter,
Rack: disk.DiskInfo.Rack,
LoadCount: len(disk.pendingTasks) + len(disk.assignedTasks),
}
// Create a new protobuf DiskInfo to avoid modifying the original
diskInfoCopy := &master_pb.DiskInfo{
DiskId: disk.DiskInfo.DiskInfo.DiskId,
MaxVolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount,
VolumeCount: disk.DiskInfo.DiskInfo.MaxVolumeCount - int64(planningCapacity.VolumeSlots),
VolumeInfos: disk.DiskInfo.DiskInfo.VolumeInfos,
EcShardInfos: disk.DiskInfo.DiskInfo.EcShardInfos,
RemoteVolumeCount: disk.DiskInfo.DiskInfo.RemoteVolumeCount,
ActiveVolumeCount: disk.DiskInfo.DiskInfo.ActiveVolumeCount,
FreeVolumeCount: disk.DiskInfo.DiskInfo.FreeVolumeCount,
}
diskCopy.DiskInfo = diskInfoCopy
available = append(available, &diskCopy)
}
}
}
return available
}
// CanAccommodateTask checks if a disk can accommodate a new task considering all constraints
func (at *ActiveTopology) CanAccommodateTask(nodeID string, diskID uint32, taskType TaskType, volumesNeeded int64) bool {
at.mutex.RLock()
defer at.mutex.RUnlock()
diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
disk, exists := at.disks[diskKey]
if !exists {
return false
}
// Check basic availability
if !at.isDiskAvailable(disk, taskType) {
return false
}
// Check effective capacity
effectiveCapacity := at.getEffectiveAvailableCapacityUnsafe(disk)
return int64(effectiveCapacity.VolumeSlots) >= volumesNeeded
}
// getPlanningCapacityUnsafe considers both pending and active tasks for planning
func (at *ActiveTopology) getPlanningCapacityUnsafe(disk *activeDisk) StorageSlotChange {
if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil {
return StorageSlotChange{}
}
baseAvailableVolumes := disk.DiskInfo.DiskInfo.MaxVolumeCount - disk.DiskInfo.DiskInfo.VolumeCount
// Use the centralized helper function to calculate task storage impact
totalImpact := at.calculateTaskStorageImpact(disk)
// Calculate available capacity considering impact (negative impact reduces availability)
availableVolumeSlots := baseAvailableVolumes - totalImpact.ToVolumeSlots()
if availableVolumeSlots < 0 {
availableVolumeSlots = 0
}
// Return detailed capacity information
return StorageSlotChange{
VolumeSlots: int32(availableVolumeSlots),
ShardSlots: -totalImpact.ShardSlots, // Available shard capacity (negative impact becomes positive availability)
}
}
// isDiskAvailableForPlanning checks if disk can accept new tasks considering pending load
func (at *ActiveTopology) isDiskAvailableForPlanning(disk *activeDisk, taskType TaskType) bool {
// Check total load including pending tasks
totalLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
if totalLoad >= MaxTotalTaskLoadPerDisk {
return false
}
// Check for conflicting task types in active tasks only
for _, task := range disk.assignedTasks {
if at.areTaskTypesConflicting(task.TaskType, taskType) {
return false
}
}
return true
}
// calculateTaskStorageImpact is a helper function that calculates the total storage impact
// from all tasks (pending and assigned) on a given disk. This eliminates code duplication
// between multiple capacity calculation functions.
func (at *ActiveTopology) calculateTaskStorageImpact(disk *activeDisk) StorageSlotChange {
if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil {
return StorageSlotChange{}
}
totalImpact := StorageSlotChange{}
// Process both pending and assigned tasks with identical logic
taskLists := [][]*taskState{disk.pendingTasks, disk.assignedTasks}
for _, taskList := range taskLists {
for _, task := range taskList {
// Calculate impact for all source locations
for _, source := range task.Sources {
if source.SourceServer == disk.NodeID && source.SourceDisk == disk.DiskID {
totalImpact.AddInPlace(source.StorageChange)
}
}
// Calculate impact for all destination locations
for _, dest := range task.Destinations {
if dest.TargetServer == disk.NodeID && dest.TargetDisk == disk.DiskID {
totalImpact.AddInPlace(dest.StorageChange)
}
}
}
}
return totalImpact
}
// getEffectiveCapacityUnsafe returns effective capacity impact without locking (for internal use)
// Returns StorageSlotChange representing the net impact from all tasks
func (at *ActiveTopology) getEffectiveCapacityUnsafe(disk *activeDisk) StorageSlotChange {
return at.calculateTaskStorageImpact(disk)
}
// getEffectiveAvailableCapacityUnsafe returns detailed available capacity as StorageSlotChange
func (at *ActiveTopology) getEffectiveAvailableCapacityUnsafe(disk *activeDisk) StorageSlotChange {
if disk.DiskInfo == nil || disk.DiskInfo.DiskInfo == nil {
return StorageSlotChange{}
}
baseAvailable := disk.DiskInfo.DiskInfo.MaxVolumeCount - disk.DiskInfo.DiskInfo.VolumeCount
netImpact := at.getEffectiveCapacityUnsafe(disk)
// Calculate available volume slots (negative impact reduces availability)
availableVolumeSlots := baseAvailable - netImpact.ToVolumeSlots()
if availableVolumeSlots < 0 {
availableVolumeSlots = 0
}
// Return detailed capacity information
return StorageSlotChange{
VolumeSlots: int32(availableVolumeSlots),
ShardSlots: -netImpact.ShardSlots, // Available shard capacity (negative impact becomes positive availability)
}
}

View file

@ -0,0 +1,114 @@
package topology
import (
"fmt"
"time"
)
// reassignTaskStates assigns tasks to the appropriate disks
func (at *ActiveTopology) reassignTaskStates() {
// Clear existing task assignments
for _, disk := range at.disks {
disk.pendingTasks = nil
disk.assignedTasks = nil
disk.recentTasks = nil
}
// Reassign pending tasks
for _, task := range at.pendingTasks {
at.assignTaskToDisk(task)
}
// Reassign assigned tasks
for _, task := range at.assignedTasks {
at.assignTaskToDisk(task)
}
// Reassign recent tasks
for _, task := range at.recentTasks {
at.assignTaskToDisk(task)
}
}
// assignTaskToDisk assigns a task to the appropriate disk(s)
func (at *ActiveTopology) assignTaskToDisk(task *taskState) {
addedDisks := make(map[string]bool)
// Local helper function to assign task to a disk and avoid code duplication
assign := func(server string, diskID uint32) {
key := fmt.Sprintf("%s:%d", server, diskID)
if server == "" || addedDisks[key] {
return
}
if disk, exists := at.disks[key]; exists {
switch task.Status {
case TaskStatusPending:
disk.pendingTasks = append(disk.pendingTasks, task)
case TaskStatusInProgress:
disk.assignedTasks = append(disk.assignedTasks, task)
case TaskStatusCompleted:
disk.recentTasks = append(disk.recentTasks, task)
}
addedDisks[key] = true
}
}
// Assign to all source disks
for _, source := range task.Sources {
assign(source.SourceServer, source.SourceDisk)
}
// Assign to all destination disks (duplicates automatically avoided by helper)
for _, dest := range task.Destinations {
assign(dest.TargetServer, dest.TargetDisk)
}
}
// isDiskAvailable checks if a disk can accept new tasks
func (at *ActiveTopology) isDiskAvailable(disk *activeDisk, taskType TaskType) bool {
// Check if disk has too many pending and active tasks
activeLoad := len(disk.pendingTasks) + len(disk.assignedTasks)
if activeLoad >= MaxConcurrentTasksPerDisk {
return false
}
// Check for conflicting task types
for _, task := range disk.assignedTasks {
if at.areTaskTypesConflicting(task.TaskType, taskType) {
return false
}
}
return true
}
// areTaskTypesConflicting checks if two task types conflict
func (at *ActiveTopology) areTaskTypesConflicting(existing, new TaskType) bool {
// Examples of conflicting task types
conflictMap := map[TaskType][]TaskType{
TaskTypeVacuum: {TaskTypeBalance, TaskTypeErasureCoding},
TaskTypeBalance: {TaskTypeVacuum, TaskTypeErasureCoding},
TaskTypeErasureCoding: {TaskTypeVacuum, TaskTypeBalance},
}
if conflicts, exists := conflictMap[existing]; exists {
for _, conflictType := range conflicts {
if conflictType == new {
return true
}
}
}
return false
}
// cleanupRecentTasks removes old recent tasks
func (at *ActiveTopology) cleanupRecentTasks() {
cutoff := time.Now().Add(-time.Duration(at.recentTaskWindowSeconds) * time.Second)
for taskID, task := range at.recentTasks {
if task.CompletedAt.Before(cutoff) {
delete(at.recentTasks, taskID)
}
}
}

View file

@ -0,0 +1,50 @@
package topology
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/storage/erasure_coding"
)
// CalculateTaskStorageImpact calculates storage impact for different task types
func CalculateTaskStorageImpact(taskType TaskType, volumeSize int64) (sourceChange, targetChange StorageSlotChange) {
switch taskType {
case TaskTypeErasureCoding:
// EC task: distributes shards to MULTIPLE targets, source reserves with zero impact
// Source reserves capacity but with zero StorageSlotChange (no actual capacity consumption during planning)
// WARNING: EC has multiple targets! Use AddPendingTask with multiple destinations for proper multi-target handling
// This simplified function returns zero impact; real EC requires specialized multi-destination calculation
return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}
case TaskTypeBalance:
// Balance task: moves volume from source to target
// Source loses 1 volume, target gains 1 volume
return StorageSlotChange{VolumeSlots: -1, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 1, ShardSlots: 0}
case TaskTypeVacuum:
// Vacuum task: frees space by removing deleted entries, no slot change
return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}
case TaskTypeReplication:
// Replication task: creates new replica on target
return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 1, ShardSlots: 0}
default:
// Unknown task type, assume minimal impact
glog.Warningf("unhandled task type %s in CalculateTaskStorageImpact, assuming default impact", taskType)
return StorageSlotChange{VolumeSlots: 0, ShardSlots: 0}, StorageSlotChange{VolumeSlots: 1, ShardSlots: 0}
}
}
// CalculateECShardStorageImpact calculates storage impact for EC shards specifically
func CalculateECShardStorageImpact(shardCount int32, expectedShardSize int64) StorageSlotChange {
// EC shards are typically much smaller than full volumes
// Use shard-level tracking for granular capacity planning
return StorageSlotChange{VolumeSlots: 0, ShardSlots: shardCount}
}
// CalculateECShardCleanupImpact calculates storage impact for cleaning up existing EC shards
func CalculateECShardCleanupImpact(originalVolumeSize int64) StorageSlotChange {
// Cleaning up existing EC shards frees shard slots
// Use the actual EC configuration constants for accurate shard count
return StorageSlotChange{VolumeSlots: 0, ShardSlots: -int32(erasure_coding.TotalShardsCount)} // Negative = freed capacity
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,121 @@
package topology
import (
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb/master_pb"
)
// TaskSource represents a single source in a multi-source task (for replicated volume cleanup)
type TaskSource struct {
SourceServer string `json:"source_server"`
SourceDisk uint32 `json:"source_disk"`
StorageChange StorageSlotChange `json:"storage_change"` // Storage impact on this source
EstimatedSize int64 `json:"estimated_size"` // Estimated size for this source
}
// TaskDestination represents a single destination in a multi-destination task
type TaskDestination struct {
TargetServer string `json:"target_server"`
TargetDisk uint32 `json:"target_disk"`
StorageChange StorageSlotChange `json:"storage_change"` // Storage impact on this destination
EstimatedSize int64 `json:"estimated_size"` // Estimated size for this destination
}
// taskState represents the current state of tasks affecting the topology (internal)
// Uses unified multi-source/multi-destination design:
// - Single-source tasks (balance, vacuum, replication): 1 source, 1 destination
// - Multi-source EC tasks (replicated volumes): N sources, M destinations
type taskState struct {
VolumeID uint32 `json:"volume_id"`
TaskType TaskType `json:"task_type"`
Status TaskStatus `json:"status"`
StartedAt time.Time `json:"started_at"`
CompletedAt time.Time `json:"completed_at,omitempty"`
EstimatedSize int64 `json:"estimated_size"` // Total estimated size of task
// Unified source and destination arrays (always used)
Sources []TaskSource `json:"sources"` // Source locations (1+ for all task types)
Destinations []TaskDestination `json:"destinations"` // Destination locations (1+ for all task types)
}
// DiskInfo represents a disk with its current state and ongoing tasks (public for external access)
type DiskInfo struct {
NodeID string `json:"node_id"`
DiskID uint32 `json:"disk_id"`
DiskType string `json:"disk_type"`
DataCenter string `json:"data_center"`
Rack string `json:"rack"`
DiskInfo *master_pb.DiskInfo `json:"disk_info"`
LoadCount int `json:"load_count"` // Number of active tasks
}
// activeDisk represents internal disk state (private)
type activeDisk struct {
*DiskInfo
pendingTasks []*taskState
assignedTasks []*taskState
recentTasks []*taskState // Completed in last N seconds
}
// activeNode represents a node with its disks (private)
type activeNode struct {
nodeID string
dataCenter string
rack string
nodeInfo *master_pb.DataNodeInfo
disks map[uint32]*activeDisk // DiskID -> activeDisk
}
// ActiveTopology provides a real-time view of cluster state with task awareness
type ActiveTopology struct {
// Core topology from master
topologyInfo *master_pb.TopologyInfo
lastUpdated time.Time
// Structured topology for easy access (private)
nodes map[string]*activeNode // NodeID -> activeNode
disks map[string]*activeDisk // "NodeID:DiskID" -> activeDisk
// Performance indexes for O(1) lookups (private)
volumeIndex map[uint32][]string // VolumeID -> list of "NodeID:DiskID" where volume replicas exist
ecShardIndex map[uint32][]string // VolumeID -> list of "NodeID:DiskID" where EC shards exist
// Task states affecting the topology (private)
pendingTasks map[string]*taskState
assignedTasks map[string]*taskState
recentTasks map[string]*taskState
// Configuration
recentTaskWindowSeconds int
// Synchronization
mutex sync.RWMutex
}
// DestinationPlan represents a planned destination for a volume/shard operation
type DestinationPlan struct {
TargetNode string `json:"target_node"`
TargetDisk uint32 `json:"target_disk"`
TargetRack string `json:"target_rack"`
TargetDC string `json:"target_dc"`
ExpectedSize uint64 `json:"expected_size"`
PlacementScore float64 `json:"placement_score"`
}
// MultiDestinationPlan represents multiple planned destinations for operations like EC
type MultiDestinationPlan struct {
Plans []*DestinationPlan `json:"plans"`
TotalShards int `json:"total_shards"`
SuccessfulRack int `json:"successful_racks"`
SuccessfulDCs int `json:"successful_dcs"`
}
// VolumeReplica represents a replica location with server and disk information
type VolumeReplica struct {
ServerID string `json:"server_id"`
DiskID uint32 `json:"disk_id"`
DataCenter string `json:"data_center"`
Rack string `json:"rack"`
}

View file

@ -0,0 +1,259 @@
package topology
import (
"fmt"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
)
// AssignTask moves a task from pending to assigned and reserves capacity
func (at *ActiveTopology) AssignTask(taskID string) error {
at.mutex.Lock()
defer at.mutex.Unlock()
task, exists := at.pendingTasks[taskID]
if !exists {
return fmt.Errorf("pending task %s not found", taskID)
}
// Check if all destination disks have sufficient capacity to reserve
for _, dest := range task.Destinations {
targetKey := fmt.Sprintf("%s:%d", dest.TargetServer, dest.TargetDisk)
if targetDisk, exists := at.disks[targetKey]; exists {
availableCapacity := at.getEffectiveAvailableCapacityUnsafe(targetDisk)
// Check if we have enough total capacity using the improved unified comparison
if !availableCapacity.CanAccommodate(dest.StorageChange) {
return fmt.Errorf("insufficient capacity on target disk %s:%d. Available: %+v, Required: %+v",
dest.TargetServer, dest.TargetDisk, availableCapacity, dest.StorageChange)
}
} else if dest.TargetServer != "" {
// Fail fast if destination disk is not found in topology
return fmt.Errorf("destination disk %s not found in topology", targetKey)
}
}
// Move task to assigned and reserve capacity
delete(at.pendingTasks, taskID)
task.Status = TaskStatusInProgress
at.assignedTasks[taskID] = task
at.reassignTaskStates()
// Log capacity reservation information for all sources and destinations
totalSourceImpact := StorageSlotChange{}
totalDestImpact := StorageSlotChange{}
for _, source := range task.Sources {
totalSourceImpact.AddInPlace(source.StorageChange)
}
for _, dest := range task.Destinations {
totalDestImpact.AddInPlace(dest.StorageChange)
}
glog.V(2).Infof("Task %s assigned and capacity reserved: %d sources (VolumeSlots:%d, ShardSlots:%d), %d destinations (VolumeSlots:%d, ShardSlots:%d)",
taskID, len(task.Sources), totalSourceImpact.VolumeSlots, totalSourceImpact.ShardSlots,
len(task.Destinations), totalDestImpact.VolumeSlots, totalDestImpact.ShardSlots)
return nil
}
// CompleteTask moves a task from assigned to recent and releases reserved capacity
// NOTE: This only releases the reserved capacity. The actual topology update (VolumeCount changes)
// should be handled by the master when it receives the task completion notification.
func (at *ActiveTopology) CompleteTask(taskID string) error {
at.mutex.Lock()
defer at.mutex.Unlock()
task, exists := at.assignedTasks[taskID]
if !exists {
return fmt.Errorf("assigned task %s not found", taskID)
}
// Release reserved capacity by moving task to completed state
delete(at.assignedTasks, taskID)
task.Status = TaskStatusCompleted
task.CompletedAt = time.Now()
at.recentTasks[taskID] = task
at.reassignTaskStates()
// Log capacity release information for all sources and destinations
totalSourceImpact := StorageSlotChange{}
totalDestImpact := StorageSlotChange{}
for _, source := range task.Sources {
totalSourceImpact.AddInPlace(source.StorageChange)
}
for _, dest := range task.Destinations {
totalDestImpact.AddInPlace(dest.StorageChange)
}
glog.V(2).Infof("Task %s completed and capacity released: %d sources (VolumeSlots:%d, ShardSlots:%d), %d destinations (VolumeSlots:%d, ShardSlots:%d)",
taskID, len(task.Sources), totalSourceImpact.VolumeSlots, totalSourceImpact.ShardSlots,
len(task.Destinations), totalDestImpact.VolumeSlots, totalDestImpact.ShardSlots)
// Clean up old recent tasks
at.cleanupRecentTasks()
return nil
}
// ApplyActualStorageChange updates the topology to reflect actual storage changes after task completion
// This should be called when the master updates the topology with new VolumeCount information
func (at *ActiveTopology) ApplyActualStorageChange(nodeID string, diskID uint32, volumeCountChange int64) {
at.mutex.Lock()
defer at.mutex.Unlock()
diskKey := fmt.Sprintf("%s:%d", nodeID, diskID)
if disk, exists := at.disks[diskKey]; exists && disk.DiskInfo != nil && disk.DiskInfo.DiskInfo != nil {
oldCount := disk.DiskInfo.DiskInfo.VolumeCount
disk.DiskInfo.DiskInfo.VolumeCount += volumeCountChange
glog.V(2).Infof("Applied actual storage change on disk %s: volume_count %d -> %d (change: %+d)",
diskKey, oldCount, disk.DiskInfo.DiskInfo.VolumeCount, volumeCountChange)
}
}
// AddPendingTask is the unified function that handles both simple and complex task creation
func (at *ActiveTopology) AddPendingTask(spec TaskSpec) error {
// Validation
if len(spec.Sources) == 0 {
return fmt.Errorf("at least one source is required")
}
if len(spec.Destinations) == 0 {
return fmt.Errorf("at least one destination is required")
}
at.mutex.Lock()
defer at.mutex.Unlock()
// Build sources array
sources := make([]TaskSource, len(spec.Sources))
for i, sourceSpec := range spec.Sources {
var storageImpact StorageSlotChange
var estimatedSize int64
if sourceSpec.StorageImpact != nil {
// Use manually specified impact
storageImpact = *sourceSpec.StorageImpact
} else {
// Auto-calculate based on task type and cleanup type
storageImpact = at.calculateSourceStorageImpact(spec.TaskType, sourceSpec.CleanupType, spec.VolumeSize)
}
if sourceSpec.EstimatedSize != nil {
estimatedSize = *sourceSpec.EstimatedSize
} else {
estimatedSize = spec.VolumeSize // Default to volume size
}
sources[i] = TaskSource{
SourceServer: sourceSpec.ServerID,
SourceDisk: sourceSpec.DiskID,
StorageChange: storageImpact,
EstimatedSize: estimatedSize,
}
}
// Build destinations array
destinations := make([]TaskDestination, len(spec.Destinations))
for i, destSpec := range spec.Destinations {
var storageImpact StorageSlotChange
var estimatedSize int64
if destSpec.StorageImpact != nil {
// Use manually specified impact
storageImpact = *destSpec.StorageImpact
} else {
// Auto-calculate based on task type
_, storageImpact = CalculateTaskStorageImpact(spec.TaskType, spec.VolumeSize)
}
if destSpec.EstimatedSize != nil {
estimatedSize = *destSpec.EstimatedSize
} else {
estimatedSize = spec.VolumeSize // Default to volume size
}
destinations[i] = TaskDestination{
TargetServer: destSpec.ServerID,
TargetDisk: destSpec.DiskID,
StorageChange: storageImpact,
EstimatedSize: estimatedSize,
}
}
// Create the task
task := &taskState{
VolumeID: spec.VolumeID,
TaskType: spec.TaskType,
Status: TaskStatusPending,
StartedAt: time.Now(),
EstimatedSize: spec.VolumeSize,
Sources: sources,
Destinations: destinations,
}
at.pendingTasks[spec.TaskID] = task
at.assignTaskToDisk(task)
glog.V(2).Infof("Added pending %s task %s: volume %d, %d sources, %d destinations",
spec.TaskType, spec.TaskID, spec.VolumeID, len(sources), len(destinations))
return nil
}
// calculateSourceStorageImpact calculates storage impact for sources based on task type and cleanup type
func (at *ActiveTopology) calculateSourceStorageImpact(taskType TaskType, cleanupType SourceCleanupType, volumeSize int64) StorageSlotChange {
switch taskType {
case TaskTypeErasureCoding:
switch cleanupType {
case CleanupVolumeReplica:
impact, _ := CalculateTaskStorageImpact(TaskTypeErasureCoding, volumeSize)
return impact
case CleanupECShards:
return CalculateECShardCleanupImpact(volumeSize)
default:
impact, _ := CalculateTaskStorageImpact(TaskTypeErasureCoding, volumeSize)
return impact
}
default:
impact, _ := CalculateTaskStorageImpact(taskType, volumeSize)
return impact
}
}
// SourceCleanupType indicates what type of data needs to be cleaned up from a source
type SourceCleanupType int
const (
CleanupVolumeReplica SourceCleanupType = iota // Clean up volume replica (frees volume slots)
CleanupECShards // Clean up existing EC shards (frees shard slots)
)
// TaskSourceSpec represents a source specification for task creation
type TaskSourceSpec struct {
ServerID string
DiskID uint32
DataCenter string // Data center of the source server
Rack string // Rack of the source server
CleanupType SourceCleanupType // For EC: volume replica vs existing shards
StorageImpact *StorageSlotChange // Optional: manual override
EstimatedSize *int64 // Optional: manual override
}
// TaskDestinationSpec represents a destination specification for task creation
type TaskDestinationSpec struct {
ServerID string
DiskID uint32
StorageImpact *StorageSlotChange // Optional: manual override
EstimatedSize *int64 // Optional: manual override
}
// TaskSpec represents a complete task specification
type TaskSpec struct {
TaskID string
TaskType TaskType
VolumeID uint32
VolumeSize int64 // Used for auto-calculation when manual impacts not provided
Sources []TaskSourceSpec // Can be single or multiple
Destinations []TaskDestinationSpec // Can be single or multiple
}

Some files were not shown because too many files have changed in this diff Show more