1
0
Fork 0
mirror of https://github.com/chrislusf/seaweedfs synced 2025-06-29 08:12:47 +02:00

Compare commits

...

107 commits
3.88 ... master

Author SHA1 Message Date
chrislu
3023a6f3a4 update doc 2025-06-28 20:27:26 -07:00
chrislu
a788d9ab53 remove unnecessary code 2025-06-28 20:26:46 -07:00
chrislu
3d519fa2a6 only leader master should send telemetry 2025-06-28 20:06:48 -07:00
chrislu
1733d0ce68 remove features and deployments fields 2025-06-28 20:03:06 -07:00
chrislu
166e36bcd3 use telemetry.seaweedfs.com 2025-06-28 19:48:03 -07:00
chrislu
adc7807451 update deploying 2025-06-28 14:59:55 -07:00
chrislu
52097a1d9b fix binary location 2025-06-28 14:43:12 -07:00
chrislu
4cd6c3ec36 copy telemetry server 2025-06-28 14:21:04 -07:00
Chris Lu
a1aab8a083
add telemetry (#6926)
* add telemetry

* fix go mod

* add default telemetry server url

* Update README.md

* replace with broker count instead of s3 count

* Update telemetry.pb.go

* github action to deploy
2025-06-28 14:11:55 -07:00
dependabot[bot]
29892c43ff
chore(deps): bump github.com/go-viper/mapstructure/v2 from 2.2.1 to 2.3.0 (#6925) 2025-06-27 10:37:38 -07:00
chrislu
5e79436498 reference seaweedfs.com 2025-06-26 11:09:17 -07:00
chalet
877b9b788a
update s3 session cache key (#6923) 2025-06-26 03:21:35 -07:00
chrislu
ab49540d2b use master.toml value if not empty
fix https://github.com/seaweedfs/seaweedfs/issues/6922
2025-06-25 17:54:56 -07:00
Chris Lu
95261a712e
Improve lock ring (#6921)
* fix flaky lock ring test

* add more tests
2025-06-24 23:04:39 -07:00
Aleksey Kosov
4511c2cc1f
Changes logging function (#6919)
* updated logging methods for stores

* updated logging methods for stores

* updated logging methods for filer

* updated logging methods for uploader and http_util

* updated logging methods for weed server

---------

Co-authored-by: akosov <a.kosov@kryptonite.ru>
2025-06-24 08:44:06 -07:00
dependabot[bot]
2cdd8092cc
chore(deps): bump github.com/go-sql-driver/mysql from 1.9.2 to 1.9.3 (#6916)
Bumps [github.com/go-sql-driver/mysql](https://github.com/go-sql-driver/mysql) from 1.9.2 to 1.9.3.
- [Release notes](https://github.com/go-sql-driver/mysql/releases)
- [Changelog](https://github.com/go-sql-driver/mysql/blob/v1.9.3/CHANGELOG.md)
- [Commits](https://github.com/go-sql-driver/mysql/compare/v1.9.2...v1.9.3)

---
updated-dependencies:
- dependency-name: github.com/go-sql-driver/mysql
  dependency-version: 1.9.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 14:12:02 -07:00
dependabot[bot]
e222883dd0
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3 from 3.110.1 to 3.111.0 (#6918)
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3

---
updated-dependencies:
- dependency-name: github.com/ydb-platform/ydb-go-sdk/v3
  dependency-version: 3.111.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 14:11:52 -07:00
dependabot[bot]
3b6155f4ee
chore(deps): bump cloud.google.com/go/storage from 1.54.0 to 1.55.0 (#6914)
Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.54.0 to 1.55.0.
- [Release notes](https://github.com/googleapis/google-cloud-go/releases)
- [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.54.0...spanner/v1.55.0)

---
updated-dependencies:
- dependency-name: cloud.google.com/go/storage
  dependency-version: 1.55.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 13:16:13 -07:00
dependabot[bot]
29fa698414
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.80.1 to 1.81.0 (#6912)
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3

Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.80.1 to 1.81.0.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.80.1...service/s3/v1.81.0)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/service/s3
  dependency-version: 1.81.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 12:01:33 -07:00
dependabot[bot]
f68f55c7e9
chore(deps): bump github.com/rclone/rclone from 1.69.3 to 1.70.1 (#6909)
Bumps [github.com/rclone/rclone](https://github.com/rclone/rclone) from 1.69.3 to 1.70.1.
- [Release notes](https://github.com/rclone/rclone/releases)
- [Changelog](https://github.com/rclone/rclone/blob/master/RELEASE.md)
- [Commits](https://github.com/rclone/rclone/compare/v1.69.3...v1.70.1)

---
updated-dependencies:
- dependency-name: github.com/rclone/rclone
  dependency-version: 1.70.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 10:53:57 -07:00
dependabot[bot]
a5bb5e04da
chore(deps): bump docker/setup-buildx-action from 3.11.0 to 3.11.1 (#6910)
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.11.0 to 3.11.1.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](18ce135bb5...e468171a9d)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-version: 3.11.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 10:48:17 -07:00
dependabot[bot]
7f1f826468
chore(deps): bump github.com/minio/crc64nvme from 1.0.1 to 1.0.2 (#6908)
Bumps [github.com/minio/crc64nvme](https://github.com/minio/crc64nvme) from 1.0.1 to 1.0.2.
- [Release notes](https://github.com/minio/crc64nvme/releases)
- [Commits](https://github.com/minio/crc64nvme/compare/v1.0.1...v1.0.2)

---
updated-dependencies:
- dependency-name: github.com/minio/crc64nvme
  dependency-version: 1.0.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 10:44:01 -07:00
dependabot[bot]
47b9db0215
chore(deps): bump golang.org/x/image from 0.27.0 to 0.28.0 (#6906)
Bumps [golang.org/x/image](https://github.com/golang/image) from 0.27.0 to 0.28.0.
- [Commits](https://github.com/golang/image/compare/v0.27.0...v0.28.0)

---
updated-dependencies:
- dependency-name: golang.org/x/image
  dependency-version: 0.28.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 10:26:20 -07:00
dependabot[bot]
ba8d261591
chore(deps): bump google.golang.org/api from 0.234.0 to 0.238.0 (#6907)
Bumps [google.golang.org/api](https://github.com/googleapis/google-api-go-client) from 0.234.0 to 0.238.0.
- [Release notes](https://github.com/googleapis/google-api-go-client/releases)
- [Changelog](https://github.com/googleapis/google-api-go-client/blob/main/CHANGES.md)
- [Commits](https://github.com/googleapis/google-api-go-client/compare/v0.234.0...v0.238.0)

---
updated-dependencies:
- dependency-name: google.golang.org/api
  dependency-version: 0.238.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 10:26:12 -07:00
dependabot[bot]
5f1d2a9745
chore(deps): bump github.com/parquet-go/parquet-go from 0.24.0 to 0.25.1 (#6851)
* chore(deps): bump github.com/parquet-go/parquet-go from 0.24.0 to 0.25.1

Bumps [github.com/parquet-go/parquet-go](https://github.com/parquet-go/parquet-go) from 0.24.0 to 0.25.1.
- [Release notes](https://github.com/parquet-go/parquet-go/releases)
- [Changelog](https://github.com/parquet-go/parquet-go/blob/main/CHANGELOG.md)
- [Commits](https://github.com/parquet-go/parquet-go/compare/v0.24.0...v0.25.1)

---
updated-dependencies:
- dependency-name: github.com/parquet-go/parquet-go
  dependency-version: 0.25.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* adjust to updated API

Fixed Reader Construction: Updated to use parquet.OpenFile() instead of passing io.Reader directly to NewReader()
Fixed EOF Handling: Changed the order of operations to process rows before checking for EOF
Added Zero Row Count Check: Added explicit check for rowCount == 0 as an additional termination condition

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
Co-authored-by: chrislu <chris.lu@gmail.com>
2025-06-23 10:25:51 -07:00
dependabot[bot]
b27ba8e984
chore(deps): bump github.com/aws/aws-sdk-go-v2/config from 1.29.14 to 1.29.17 (#6905)
chore(deps): bump github.com/aws/aws-sdk-go-v2/config

Bumps [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) from 1.29.14 to 1.29.17.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.29.14...config/v1.29.17)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/config
  dependency-version: 1.29.17
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-23 09:40:44 -07:00
chrislu
7324cb7171 3.92 2025-06-22 21:17:06 -07:00
Yixing Cheng
5a7d226d93
chore: keep master statefulSet chart up-to-date (#6903)
This patch adds some missing master options to the helm chart of master statefulSet.
2025-06-20 17:30:17 -07:00
Chris Lu
2b3385e201
Helm Charts: add ip bind for filer (#6902)
add ip bind for filer

fix https://github.com/seaweedfs/seaweedfs/issues/6900
2025-06-20 10:46:57 -07:00
dependabot[bot]
828228dbb0
chore(deps): bump github.com/go-chi/chi/v5 from 5.1.0 to 5.2.2 (#6901)
Bumps [github.com/go-chi/chi/v5](https://github.com/go-chi/chi) from 5.1.0 to 5.2.2.
- [Release notes](https://github.com/go-chi/chi/releases)
- [Changelog](https://github.com/go-chi/chi/blob/master/CHANGELOG.md)
- [Commits](https://github.com/go-chi/chi/compare/v5.1.0...v5.2.2)

---
updated-dependencies:
- dependency-name: github.com/go-chi/chi/v5
  dependency-version: 5.2.2
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-20 10:00:14 -07:00
Aleksey Kosov
90c128e7a6
Context-based logging with request ID (#6899) 2025-06-20 06:23:53 -07:00
Chris Lu
a72c442945
Fix chunked data reading if iam not enabled (#6898)
* fix chunked data reading if iam not enabled

* add unit test
2025-06-19 22:58:10 -07:00
Chris Lu
f52134f9a1
adding metricsIp in Helm chart (#6897) 2025-06-19 22:52:19 -07:00
SmoothDenis
abd5102819
ydb filer improvements (#6890)
* fix(ydb): table creation with params

* fix(ydb): migrate to new query client & prevent creation table-bucket on get request

* fix(ydb): use new query client with kv req

* fix(ydb): use directory in every query

* fix(ydb): del unused import

* fix(ydb): tests & default const usage
2025-06-19 14:16:01 -07:00
chrislu
da728750be follow grow volume option version 2025-06-19 13:54:54 -07:00
chrislu
2f1b3d68d7 pass volume version when creating a volume 2025-06-19 01:15:25 -07:00
chrislu
87927d068b display volume version when listing 2025-06-19 00:33:01 -07:00
chrislu
9a115068af adding cassandra2
fix https://github.com/seaweedfs/seaweedfs/issues/6888
2025-06-17 11:25:37 -07:00
chrislu
748bf5e4d3 add default value when reading needle version 2025-06-16 23:35:03 -07:00
chrislu
c602f53a6e tail-volume-uses-the-source-volume-version 2025-06-16 22:46:13 -07:00
chrislu
d2be5822a1 refactoring 2025-06-16 22:25:22 -07:00
chrislu
96632a34b1 add version to volume proto 2025-06-16 22:05:06 -07:00
dependabot[bot]
11f37cd9f2
chore(deps): bump github.com/rdleal/intervalst from 1.4.1 to 1.5.0 (#6882)
Bumps [github.com/rdleal/intervalst](https://github.com/rdleal/intervalst) from 1.4.1 to 1.5.0.
- [Commits](https://github.com/rdleal/intervalst/compare/v1.4.1...v1.5.0)

---
updated-dependencies:
- dependency-name: github.com/rdleal/intervalst
  dependency-version: 1.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 14:23:12 -07:00
dependabot[bot]
34c6249886
chore(deps): bump github.com/hanwen/go-fuse/v2 from 2.7.3-0.20250605191109-50f6569d1a7d to 2.8.0 (#6880)
chore(deps): bump github.com/hanwen/go-fuse/v2

Bumps [github.com/hanwen/go-fuse/v2](https://github.com/hanwen/go-fuse) from 2.7.3-0.20250605191109-50f6569d1a7d to 2.8.0.
- [Commits](https://github.com/hanwen/go-fuse/commits/v2.8.0)

---
updated-dependencies:
- dependency-name: github.com/hanwen/go-fuse/v2
  dependency-version: 2.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 14:22:27 -07:00
Chris Lu
5d8a391b95
filer store: fix nil for mongodb (#6886)
fix https://github.com/seaweedfs/seaweedfs/issues/6885
2025-06-16 14:21:05 -07:00
dependabot[bot]
06a3140142
chore(deps): bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.68 to 1.17.69 (#6883) 2025-06-16 12:51:21 -07:00
Alexey
29d1701c34
Fix url scheme using for forwarded request with changed proto (#6884) 2025-06-16 12:50:09 -07:00
chrislu
78069605a6 sort lifecycles 2025-06-16 11:48:29 -07:00
dependabot[bot]
549fb110d7
chore(deps): bump golang.org/x/tools from 0.33.0 to 0.34.0 (#6878)
Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.33.0 to 0.34.0.
- [Release notes](https://github.com/golang/tools/releases)
- [Commits](https://github.com/golang/tools/compare/v0.33.0...v0.34.0)

---
updated-dependencies:
- dependency-name: golang.org/x/tools
  dependency-version: 0.34.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 11:03:20 -07:00
chrislu
f0e987dc9d ensure consistent testing 2025-06-16 10:56:39 -07:00
dependabot[bot]
f598d8e84c
chore(deps): bump github.com/getsentry/sentry-go from 0.31.1 to 0.33.0 (#6879)
Bumps [github.com/getsentry/sentry-go](https://github.com/getsentry/sentry-go) from 0.31.1 to 0.33.0.
- [Release notes](https://github.com/getsentry/sentry-go/releases)
- [Changelog](https://github.com/getsentry/sentry-go/blob/master/CHANGELOG.md)
- [Commits](https://github.com/getsentry/sentry-go/compare/v0.31.1...v0.33.0)

---
updated-dependencies:
- dependency-name: github.com/getsentry/sentry-go
  dependency-version: 0.33.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 10:47:24 -07:00
dependabot[bot]
d6de561650
chore(deps): bump golang.org/x/net from 0.40.0 to 0.41.0 (#6877)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.40.0 to 0.41.0.
- [Commits](https://github.com/golang/net/compare/v0.40.0...v0.41.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-version: 0.41.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 10:45:40 -07:00
dependabot[bot]
0cd3483158
chore(deps): bump google.golang.org/grpc from 1.72.1 to 1.73.0 (#6876)
Bumps [google.golang.org/grpc](https://github.com/grpc/grpc-go) from 1.72.1 to 1.73.0.
- [Release notes](https://github.com/grpc/grpc-go/releases)
- [Commits](https://github.com/grpc/grpc-go/compare/v1.72.1...v1.73.0)

---
updated-dependencies:
- dependency-name: google.golang.org/grpc
  dependency-version: 1.73.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 10:23:41 -07:00
dependabot[bot]
db36e89e7b
chore(deps): bump go.mongodb.org/mongo-driver from 1.17.3 to 1.17.4 (#6875)
Bumps [go.mongodb.org/mongo-driver](https://github.com/mongodb/mongo-go-driver) from 1.17.3 to 1.17.4.
- [Release notes](https://github.com/mongodb/mongo-go-driver/releases)
- [Commits](https://github.com/mongodb/mongo-go-driver/compare/v1.17.3...v1.17.4)

---
updated-dependencies:
- dependency-name: go.mongodb.org/mongo-driver
  dependency-version: 1.17.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 10:23:34 -07:00
dependabot[bot]
d8e8e11519
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3 from 3.108.3 to 3.110.1 (#6874)
chore(deps): bump github.com/ydb-platform/ydb-go-sdk/v3

Bumps [github.com/ydb-platform/ydb-go-sdk/v3](https://github.com/ydb-platform/ydb-go-sdk) from 3.108.3 to 3.110.1.
- [Release notes](https://github.com/ydb-platform/ydb-go-sdk/releases)
- [Changelog](https://github.com/ydb-platform/ydb-go-sdk/blob/master/CHANGELOG.md)
- [Commits](https://github.com/ydb-platform/ydb-go-sdk/compare/v3.108.3...v3.110.1)

---
updated-dependencies:
- dependency-name: github.com/ydb-platform/ydb-go-sdk/v3
  dependency-version: 3.110.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 10:23:22 -07:00
dependabot[bot]
13103c32d8
chore(deps): bump docker/setup-buildx-action from 3.10.0 to 3.11.0 (#6873)
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.10.0 to 3.11.0.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](b5ca514318...18ce135bb5)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-version: 3.11.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-16 10:23:13 -07:00
Erik Jacobson
77397be070
allow distro mount command to use disableXAttr (#6872) 2025-06-16 08:45:05 -07:00
Konstantin Lebedev
958d88cb85
[shell] volume copy add param noLock (#6871) 2025-06-16 07:39:19 -07:00
chrislu
c26299b05a 3.91 2025-06-15 20:42:16 -07:00
NyaMisty
f894e7b7a5
Support filtering source disk type in volume.tier.upload (#6868) 2025-06-15 20:30:04 -07:00
NyaMisty
53e5c84523
Fix wrong error handling in volume.tier.upload when stream == nil but copyErr != nil (#6867) 2025-06-15 20:28:40 -07:00
NyaMisty
cdc543aa9e
Correctly sort in volume.list to ensure output consistency (#6866) 2025-06-15 20:27:48 -07:00
Gerry Hernandez
e653de54b4
FUSE Mount: Fix buffer allocation during copy (#6863)
Fix buffer allocation during FUSE copy
2025-06-13 12:27:39 -07:00
chrislu
c79e73aa2a mount: complete fix for freebsd
fix https://github.com/seaweedfs/seaweedfs/issues/6645
2025-06-12 08:19:47 -07:00
chrislu
e71d681fee refactor 2025-06-11 20:46:13 -07:00
chrislu
7c4d98446b refactor 2025-06-11 20:13:06 -07:00
chrislu
f27e195354 refactoring 2025-06-11 20:13:06 -07:00
Bruce Zou
fa730abec7
fix rocksdb enumerate (#6858) 2025-06-11 17:55:58 -07:00
dependabot[bot]
f7df4856e5
chore(deps): bump github.com/cloudflare/circl from 1.3.7 to 1.6.1 (#6856)
Bumps [github.com/cloudflare/circl](https://github.com/cloudflare/circl) from 1.3.7 to 1.6.1.
- [Release notes](https://github.com/cloudflare/circl/releases)
- [Commits](https://github.com/cloudflare/circl/compare/v1.3.7...v1.6.1)

---
updated-dependencies:
- dependency-name: github.com/cloudflare/circl
  dependency-version: 1.6.1
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-10 19:19:12 -07:00
dependabot[bot]
e9c2d5d4f8
chore(deps): bump github.com/redis/go-redis/v9 from 9.8.0 to 9.10.0 (#6850)
Bumps [github.com/redis/go-redis/v9](https://github.com/redis/go-redis) from 9.8.0 to 9.10.0.
- [Release notes](https://github.com/redis/go-redis/releases)
- [Changelog](https://github.com/redis/go-redis/blob/master/CHANGELOG.md)
- [Commits](https://github.com/redis/go-redis/compare/v9.8.0...v9.10.0)

---
updated-dependencies:
- dependency-name: github.com/redis/go-redis/v9
  dependency-version: 9.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-09 10:38:36 -07:00
dependabot[bot]
6e5e6df326
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.80.0 to 1.80.1 (#6852)
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3

Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.80.0 to 1.80.1.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.80.0...service/s3/v1.80.1)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/service/s3
  dependency-version: 1.80.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-09 10:38:22 -07:00
dependabot[bot]
f3b1748eb7
chore(deps): bump go.etcd.io/etcd/client/v3 from 3.6.0 to 3.6.1 (#6855)
Bumps [go.etcd.io/etcd/client/v3](https://github.com/etcd-io/etcd) from 3.6.0 to 3.6.1.
- [Release notes](https://github.com/etcd-io/etcd/releases)
- [Commits](https://github.com/etcd-io/etcd/compare/v3.6.0...v3.6.1)

---
updated-dependencies:
- dependency-name: go.etcd.io/etcd/client/v3
  dependency-version: 3.6.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-06-09 10:37:57 -07:00
dependabot[bot]
dcabb71a28
chore(deps): bump github.com/aws/aws-sdk-go-v2/credentials from 1.17.67 to 1.17.68 (#6853) 2025-06-09 09:17:09 -07:00
dependabot[bot]
1023971dbc
chore(deps): bump modernc.org/sqlite from 1.37.1 to 1.38.0 (#6854) 2025-06-09 09:16:49 -07:00
chrislu
33ecc8442e refactor 2025-06-08 22:11:09 -07:00
chrislu
81aeec74a4 3.90 2025-06-08 20:59:43 -07:00
chrislu
60f11f6510 add a readme file for volume needle data layout 2025-06-07 15:52:51 -07:00
chrislu
61c4f01e05 refactor 2025-06-06 08:55:32 -07:00
chrislu
ce51d60127 include new changes related to FreeBSD
50f6569d1a
2025-06-06 08:48:17 -07:00
chrislu
a489d99333 fix tests 2025-06-06 08:25:04 -07:00
Lisandro Pin
00c621abb8
Fix dumb typo in 08556257 (#6844) 2025-06-06 05:59:11 -07:00
SmoothDenis
c99530ec66
fix(filer): add missing return after KvDelete in KvPut with empty value (#6846) 2025-06-06 05:58:03 -07:00
chrislu
7439af0eca refactoring 2025-06-06 01:35:48 -07:00
chrislu
cc135c63f7 a bit refactoring 2025-06-06 01:26:54 -07:00
chrislu
c4695fc3b3 refactor needle write for different versions 2025-06-06 00:35:13 -07:00
Lisandro Pin
bed0a64693
New needle_map.CompactMap() implementation for reduced memory usage (#6842)
* Rework `needle_map.CompactMap()` to maximize memory efficiency.

* Use a memory-efficient structure for `CompactMap` needle value entries.

This slightly complicates the code, but makes a **massive** difference
in memory efficiency - preliminary results show a ~30% reduction in
heap usage, with no measurable performance impact otherwise.

* Clean up type for `CompactMap` chunk IDs.

* Add a small comment description for `CompactMap()`.

* Add the old version of `CompactMap()` for comparison purposes.
2025-06-05 14:03:29 -07:00
chrislu
d8ddc22fc2 update to latest hanwen/go-fuse/v2
https://github.com/seaweedfs/seaweedfs/issues/6645#issuecomment-2933832235

fix https://github.com/seaweedfs/seaweedfs/issues/6645
2025-06-05 09:20:14 -07:00
chrislu
35f0daa198 the isFsync parameter is essentially IsAsyncWrite and it needs to be turned off if s.isStopping
d8c574a5ef (r159132764)
2025-06-05 00:19:10 -07:00
chrislu
2f3de5e199 fix build 2025-06-03 22:50:45 -07:00
chrislu
bd4891a117 change version directory 2025-06-03 22:46:10 -07:00
chrislu
7039d5003c gorocksdb 1.10.1 ~ rocksdb 10.2.1 2025-06-03 22:46:10 -07:00
Chris Lu
7151a54b28 Merge branch 'master' of https://github.com/seaweedfs/seaweedfs 2025-06-02 23:57:54 -07:00
Chris Lu
b25561d0d7 3.89 2025-06-02 23:56:58 -07:00
dependabot[bot]
9994617dad
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3 from 1.78.2 to 1.80.0 (#6830)
chore(deps): bump github.com/aws/aws-sdk-go-v2/service/s3

Bumps [github.com/aws/aws-sdk-go-v2/service/s3](https://github.com/aws/aws-sdk-go-v2) from 1.78.2 to 1.80.0.
- [Release notes](https://github.com/aws/aws-sdk-go-v2/releases)
- [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/main/changelog-template.json)
- [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.78.2...service/s3/v1.80.0)

---
updated-dependencies:
- dependency-name: github.com/aws/aws-sdk-go-v2/service/s3
  dependency-version: 1.80.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
2025-06-02 23:55:52 -07:00
dependabot[bot]
7265a9c3b7
chore(deps): bump modernc.org/sqlite from 1.37.0 to 1.37.1 (#6831)
Bumps [modernc.org/sqlite](https://gitlab.com/cznic/sqlite) from 1.37.0 to 1.37.1.
- [Commits](https://gitlab.com/cznic/sqlite/compare/v1.37.0...v1.37.1)

---
updated-dependencies:
- dependency-name: modernc.org/sqlite
  dependency-version: 1.37.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
2025-06-02 23:54:18 -07:00
dependabot[bot]
4217f0dcaf
chore(deps): bump github.com/prometheus/client_golang from 1.21.1 to 1.22.0 (#6832)
chore(deps): bump github.com/prometheus/client_golang

Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.21.1 to 1.22.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.21.1...v1.22.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-version: 1.22.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
2025-06-02 23:52:23 -07:00
dependabot[bot]
aeb3bcdfc1
chore(deps): bump docker/build-push-action from 6.17.0 to 6.18.0 (#6834)
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 6.17.0 to 6.18.0.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](1dc7386353...263435318d)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-version: 6.18.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
2025-06-02 23:52:11 -07:00
dependabot[bot]
2c494751e5
chore(deps): bump github.com/fluent/fluent-logger-golang from 1.9.0 to 1.10.0 (#6833)
chore(deps): bump github.com/fluent/fluent-logger-golang

Bumps [github.com/fluent/fluent-logger-golang](https://github.com/fluent/fluent-logger-golang) from 1.9.0 to 1.10.0.
- [Changelog](https://github.com/fluent/fluent-logger-golang/blob/master/CHANGELOG.md)
- [Commits](https://github.com/fluent/fluent-logger-golang/compare/v1.9.0...v1.10.0)

---
updated-dependencies:
- dependency-name: github.com/fluent/fluent-logger-golang
  dependency-version: 1.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Chris Lu <chrislusf@users.noreply.github.com>
2025-06-02 23:52:07 -07:00
Chris Lu
d40746f34e
fix insert beyond look back window (#6838) 2025-06-02 23:43:01 -07:00
Lisandro Pin
7204731749
Minor fix for the CompactMap() performance test. (#6836)
Per-entry memory usage is based on `TotalAllocs`, which is incorrect - that
value is a cummulative of heap usage, which doesn't decrease when objects
are freeed.

`Allocs` is instead an accurate represeentation of actual memory usage
at the time metrics are reported.
2025-06-02 17:09:01 -07:00
Luna Yao
5e354a18a3
Update filer upsert sqlstring for postgresql (#6835) 2025-06-02 10:46:50 -07:00
Chris Lu
90802cb201
revert part of d8c574a5ef (#6829) 2025-06-01 12:27:49 -07:00
FQHSLycopene
ee0c14673d
Fix TTL Behavior for Directories in Path-Specific Configuration (#6827) 2025-05-29 02:38:12 -07:00
Lisandro Pin
9ffc8bcb54
Further improve memory usage of needle_map.CompactMap(). (#6825) 2025-05-28 11:42:00 -07:00
Aleksey Kosov
283d9e0079
Add context with request (#6824) 2025-05-28 11:34:02 -07:00
Bruce Zou
62aaaa18f3
unlimit the list entries in completeMultipartUpload (#6822) 2025-05-27 03:41:27 -07:00
royatwp
2786bea839
Unable to upload empty files to seaweed via. multipart when length is unknown (#6729) 2025-05-26 23:25:01 -07:00
256 changed files with 12757 additions and 13017 deletions

View file

@ -53,7 +53,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
@ -68,7 +68,7 @@ jobs:
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-normal-disk
@ -102,7 +102,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-large-disk
@ -117,7 +117,7 @@ jobs:
release_tag: dev
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed-normal-disk

View file

@ -38,7 +38,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -38,7 +38,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -38,7 +38,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -38,7 +38,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -39,7 +39,7 @@ jobs:
build_flags: -tags elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
@ -53,7 +53,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset,elastic,gocdk,rclone,sqlite,tarantool,tikv,ydb
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -38,7 +38,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
# build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed
@ -52,7 +52,7 @@ jobs:
overwrite: true
pre_command: export CGO_ENABLED=0 && export GODEBUG=http2client=0
build_flags: -tags 5BytesOffset # optional, default is
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=${{github.sha}}
ldflags: -s -w -extldflags -static -X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=${{github.sha}}
# Where to run `go build .`
project_path: weed
binary_name: weed

View file

@ -36,7 +36,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
with:
buildkitd-flags: "--debug"
-
@ -56,7 +56,7 @@ jobs:
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
with:
buildkitd-flags: "--debug"
-
@ -57,7 +57,7 @@ jobs:
password: ${{ secrets.GHCR_TOKEN }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -38,7 +38,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -48,7 +48,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -38,7 +38,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -48,7 +48,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

View file

@ -37,7 +37,7 @@ jobs:
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
-
name: Login to Docker Hub
if: github.event_name != 'pull_request'
@ -47,7 +47,7 @@ jobs:
password: ${{ secrets.DOCKER_PASSWORD }}
-
name: Build
uses: docker/build-push-action@1dc73863535b631f98b2378be8619f83b136f4a0 # v2
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v2
with:
context: ./docker
push: ${{ github.event_name != 'pull_request' }}

171
.github/workflows/deploy_telemetry.yml vendored Normal file
View file

@ -0,0 +1,171 @@
# This workflow will build and deploy the SeaweedFS telemetry server
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
name: Deploy Telemetry Server
on:
workflow_dispatch:
inputs:
setup:
description: 'Run first-time server setup'
required: true
type: boolean
default: false
deploy:
description: 'Deploy telemetry server to remote server'
required: true
type: boolean
default: false
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: '1.24'
- name: Build Telemetry Server
if: github.event_name == 'workflow_dispatch' && inputs.deploy
run: |
go mod tidy
echo "Building telemetry server..."
GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go
ls -la telemetry-server
echo "Build completed successfully"
- name: First-time Server Setup
if: github.event_name == 'workflow_dispatch' && inputs.setup
env:
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
chmod 600 ~/.ssh/deploy_key
echo "Host *" > ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
# Create all required directories with proper permissions
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \
chmod 755 ~/seaweedfs-telemetry/logs && \
chmod 755 ~/seaweedfs-telemetry/data && \
touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \
chmod 644 ~/seaweedfs-telemetry/logs/*.log"
# Create systemd service file
echo "
[Unit]
Description=SeaweedFS Telemetry Server
After=network.target
[Service]
Type=simple
User=$REMOTE_USER
WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry
ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353
Restart=always
RestartSec=5
StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log
StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log
[Install]
WantedBy=multi-user.target" > telemetry.service
# Setup logrotate configuration
echo "# SeaweedFS Telemetry service log rotation
/home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log {
daily
rotate 30
compress
delaycompress
missingok
notifempty
create 644 $REMOTE_USER $REMOTE_USER
postrotate
systemctl restart telemetry.service
endscript
}" > telemetry_logrotate
# Copy configuration files
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
# Copy and install service and logrotate files
scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \
sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \
sudo systemctl daemon-reload && \
sudo systemctl enable telemetry.service"
echo "✅ First-time setup completed successfully!"
echo "📋 Next step: Run the deployment to install the telemetry server binary"
echo " 1. Go to GitHub Actions → Deploy Telemetry Server"
echo " 2. Click 'Run workflow'"
echo " 3. Check 'Deploy telemetry server to remote server'"
echo " 4. Click 'Run workflow'"
rm -f ~/.ssh/deploy_key
- name: Deploy Telemetry Server to Remote Server
if: github.event_name == 'workflow_dispatch' && inputs.deploy
env:
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
run: |
mkdir -p ~/.ssh
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
chmod 600 ~/.ssh/deploy_key
echo "Host *" > ~/.ssh/config
echo " StrictHostKeyChecking no" >> ~/.ssh/config
# Create temp directory and copy binary
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp"
scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/
# Copy updated configuration files
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
# Check if service exists and deploy accordingly
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then
echo 'Service exists, performing update...'
sudo systemctl stop telemetry.service
mkdir -p ~/seaweedfs-telemetry/bin
mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/
chmod +x ~/seaweedfs-telemetry/bin/telemetry-server
sudo systemctl start telemetry.service
sudo systemctl status telemetry.service
else
echo 'ERROR: telemetry.service not found!'
echo 'Please run the first-time setup before deploying.'
echo 'Go to GitHub Actions → Deploy Telemetry Server → Run workflow → Check \"Run first-time server setup\"'
exit 1
fi"
# Verify deployment
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
echo 'Waiting for service to start...'
sleep 5
curl -f http://localhost:8353/health || echo 'Health check failed'"
rm -f ~/.ssh/deploy_key
- name: Notify Deployment Status
if: always()
run: |
if [ "${{ job.status }}" == "success" ]; then
echo "✅ Telemetry server deployment successful"
echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353"
echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics"
else
echo "❌ Telemetry server deployment failed"
fi

View file

@ -73,6 +73,7 @@ Table of Contents
* [Installation Guide](#installation-guide)
* [Disk Related Topics](#disk-related-topics)
* [Benchmark](#benchmark)
* [Enterprise](#enterprise)
* [License](#license)
# Quick Start #
@ -651,6 +652,13 @@ Total Errors:0.
[Back to TOC](#table-of-contents)
## Enterprise ##
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
which has a self-healing storage format with better data protection.
[Back to TOC](#table-of-contents)
## License ##
Licensed under the Apache License, Version 2.0 (the "License");

View file

@ -6,7 +6,7 @@ ARG BRANCH=${BRANCH:-master}
ARG TAGS
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
&& CGO_ENABLED=0 go install -tags "$TAGS" -ldflags "-extldflags -static ${LDFLAGS}"
FROM alpine AS final

View file

@ -3,7 +3,7 @@ FROM golang:1.24 as builder
RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
ENV ROCKSDB_VERSION v9.10.0
ENV ROCKSDB_VERSION v10.2.1
# build RocksDB
RUN cd /tmp && \

View file

@ -3,7 +3,7 @@ FROM golang:1.24 as builder
RUN apt-get update
RUN apt-get install -y build-essential libsnappy-dev zlib1g-dev libbz2-dev libgflags-dev liblz4-dev libzstd-dev
ENV ROCKSDB_VERSION v9.10.0
ENV ROCKSDB_VERSION v10.2.1
# build RocksDB
RUN cd /tmp && \
@ -21,7 +21,7 @@ RUN git clone https://github.com/seaweedfs/seaweedfs /go/src/github.com/seaweedf
ARG BRANCH=${BRANCH:-master}
RUN cd /go/src/github.com/seaweedfs/seaweedfs && git checkout $BRANCH
RUN cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"

View file

@ -5,7 +5,7 @@ RUN mkdir -p /go/src/github.com/seaweedfs/
ADD . /go/src/github.com/seaweedfs/seaweedfs
RUN ls -al /go/src/github.com/seaweedfs/ && \
cd /go/src/github.com/seaweedfs/seaweedfs/weed \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(git rev-parse --short HEAD)" \
&& export LDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(git rev-parse --short HEAD)" \
&& go install -tags "5BytesOffset rocksdb" -ldflags "-extldflags -static ${LDFLAGS}"

View file

@ -7,7 +7,7 @@ gen: dev
cgo ?= 0
binary:
export SWCOMMIT=$(shell git rev-parse --short HEAD)
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
export SWLDFLAGS="-X github.com/seaweedfs/seaweedfs/weed/util/version.COMMIT=$(SWCOMMIT)"
cd ../weed && CGO_ENABLED=$(cgo) GOOS=linux go build $(options) -tags "$(tags)" -ldflags "-s -w -extldflags -static $(SWLDFLAGS)" && mv weed ../docker/
cd ../other/mq_client_example/agent_pub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_pub_record ../../../docker/
cd ../other/mq_client_example/agent_sub_record && CGO_ENABLED=$(cgo) GOOS=linux go build && mv agent_sub_record ../../../docker/

246
go.mod
View file

@ -5,9 +5,9 @@ go 1.24
toolchain go1.24.1
require (
cloud.google.com/go v0.121.0 // indirect
cloud.google.com/go v0.121.1 // indirect
cloud.google.com/go/pubsub v1.49.0
cloud.google.com/go/storage v1.54.0
cloud.google.com/go/storage v1.55.0
github.com/Azure/azure-pipeline-go v0.2.3
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/Shopify/sarama v1.38.1
@ -31,7 +31,7 @@ require (
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/go-redsync/redsync/v4 v4.13.0
github.com/go-sql-driver/mysql v1.9.2
github.com/go-sql-driver/mysql v1.9.3
github.com/go-zookeeper/zk v1.0.3 // indirect
github.com/gocql/gocql v1.7.0
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
@ -56,7 +56,7 @@ require (
github.com/klauspost/reedsolomon v1.12.4
github.com/kurin/blazer v0.5.3
github.com/lib/pq v1.10.9
github.com/linxGnu/grocksdb v1.9.9
github.com/linxGnu/grocksdb v1.10.1
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-ieproxy v0.0.11 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
@ -68,9 +68,9 @@ require (
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/posener/complete v1.2.3
github.com/pquerna/cachecontrol v0.2.0
github.com/prometheus/client_golang v1.21.1
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.64.0 // indirect
github.com/prometheus/procfs v0.16.1
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
@ -94,30 +94,30 @@ require (
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
go.etcd.io/etcd/client/v3 v3.6.0
go.mongodb.org/mongo-driver v1.17.3
go.etcd.io/etcd/client/v3 v3.6.1
go.mongodb.org/mongo-driver v1.17.4
go.opencensus.io v0.24.0 // indirect
gocloud.dev v0.41.0
gocloud.dev/pubsub/natspubsub v0.41.0
gocloud.dev/pubsub/rabbitpubsub v0.41.0
golang.org/x/crypto v0.38.0
golang.org/x/exp v0.0.0-20250305212735-054e65f0b394
golang.org/x/image v0.27.0
golang.org/x/net v0.40.0
golang.org/x/crypto v0.39.0
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
golang.org/x/image v0.28.0
golang.org/x/net v0.41.0
golang.org/x/oauth2 v0.30.0 // indirect
golang.org/x/sys v0.33.0
golang.org/x/text v0.25.0 // indirect
golang.org/x/tools v0.33.0
golang.org/x/text v0.26.0 // indirect
golang.org/x/tools v0.34.0
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/api v0.234.0
google.golang.org/api v0.238.0
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
google.golang.org/grpc v1.72.1
google.golang.org/grpc v1.73.0
google.golang.org/protobuf v1.36.6
gopkg.in/inf.v0 v0.9.1 // indirect
modernc.org/b v1.0.0 // indirect
modernc.org/mathutil v1.7.1
modernc.org/memory v1.9.1 // indirect
modernc.org/sqlite v1.37.0
modernc.org/memory v1.11.0 // indirect
modernc.org/sqlite v1.38.0
modernc.org/strutil v1.2.1
)
@ -125,126 +125,133 @@ require (
github.com/Jille/raft-grpc-transport v1.6.1
github.com/arangodb/go-driver v1.6.6
github.com/armon/go-metrics v0.4.1
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/config v1.29.14
github.com/aws/aws-sdk-go-v2/credentials v1.17.67
github.com/aws/aws-sdk-go-v2/service/s3 v1.78.2
github.com/aws/aws-sdk-go-v2 v1.36.5
github.com/aws/aws-sdk-go-v2/config v1.29.17
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
github.com/aws/aws-sdk-go-v2/service/s3 v1.81.0
github.com/cognusion/imaging v1.0.2
github.com/fluent/fluent-logger-golang v1.9.0
github.com/getsentry/sentry-go v0.31.1
github.com/fluent/fluent-logger-golang v1.10.0
github.com/getsentry/sentry-go v0.33.0
github.com/golang-jwt/jwt/v5 v5.2.2
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
github.com/hanwen/go-fuse/v2 v2.7.2
github.com/hanwen/go-fuse/v2 v2.8.0
github.com/hashicorp/raft v1.7.3
github.com/hashicorp/raft-boltdb/v2 v2.3.1
github.com/minio/crc64nvme v1.0.1
github.com/minio/crc64nvme v1.0.2
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/parquet-go/parquet-go v0.24.0
github.com/pkg/sftp v1.13.7
github.com/parquet-go/parquet-go v0.25.1
github.com/pkg/sftp v1.13.9
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rclone/rclone v1.69.3
github.com/rdleal/intervalst v1.4.1
github.com/redis/go-redis/v9 v9.8.0
github.com/rclone/rclone v1.70.1
github.com/rdleal/intervalst v1.5.0
github.com/redis/go-redis/v9 v9.10.0
github.com/schollz/progressbar/v3 v3.18.0
github.com/shirou/gopsutil/v3 v3.24.5
github.com/tarantool/go-tarantool/v2 v2.3.2
github.com/tikv/client-go/v2 v2.0.7
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.108.3
go.etcd.io/etcd/client/pkg/v3 v3.6.0
github.com/ydb-platform/ydb-go-sdk/v3 v3.111.0
go.etcd.io/etcd/client/pkg/v3 v3.6.1
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.14.0
golang.org/x/sync v0.15.0
google.golang.org/grpc/security/advancedtls v1.0.0
)
require (
cel.dev/expr v0.22.1 // indirect
cloud.google.com/go/auth v0.16.1 // indirect
cel.dev/expr v0.23.0 // indirect
cloud.google.com/go/auth v0.16.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/iam v1.5.2 // indirect
cloud.google.com/go/monitoring v1.24.2 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.1 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
github.com/Files-com/files-sdk-go/v3 v3.2.97 // indirect
github.com/Files-com/files-sdk-go/v3 v3.2.173 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
github.com/IBM/go-sdk-core/v5 v5.20.0 // indirect
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
github.com/ProtonMail/go-crypto v1.1.3 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
github.com/ProtonMail/go-srp v0.0.7 // indirect
github.com/ProtonMail/gopenpgp/v2 v2.7.4 // indirect
github.com/PuerkitoBio/goquery v1.8.1 // indirect
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
github.com/PuerkitoBio/goquery v1.10.3 // indirect
github.com/abbot/go-http-auth v0.4.0 // indirect
github.com/andybalholm/brotli v1.1.0 // indirect
github.com/andybalholm/cascadia v1.3.2 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.77 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
github.com/aws/aws-sdk-go-v2/service/sns v1.34.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
github.com/aws/smithy-go v1.22.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
github.com/aws/smithy-go v1.22.4 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/bradenaw/juniper v0.15.2 // indirect
github.com/bradenaw/juniper v0.15.3 // indirect
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
github.com/buengese/sgzip v0.1.1 // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
github.com/cloudflare/circl v1.3.7 // indirect
github.com/cloudinary/cloudinary-go/v2 v2.9.0 // indirect
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudinary/cloudinary-go/v2 v2.10.0 // indirect
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
github.com/creasty/defaults v1.7.0 // indirect
github.com/creasty/defaults v1.8.0 // indirect
github.com/cronokirby/saferith v0.33.0 // indirect
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
github.com/d4l3k/messagediff v1.2.1 // indirect
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
github.com/ebitengine/purego v0.8.3 // indirect
github.com/ebitengine/purego v0.8.4 // indirect
github.com/elastic/gosigar v0.14.2 // indirect
github.com/emersion/go-message v0.18.0 // indirect
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
github.com/emersion/go-message v0.18.2 // indirect
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/fatih/color v1.16.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/flynn/noise v1.0.1 // indirect
github.com/gabriel-vasile/mimetype v1.4.7 // indirect
github.com/geoffgarside/ber v1.1.0 // indirect
github.com/go-chi/chi/v5 v5.1.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
github.com/geoffgarside/ber v1.2.0 // indirect
github.com/go-chi/chi/v5 v5.2.2 // indirect
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
github.com/go-resty/resty/v2 v2.11.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/gofrs/flock v0.8.1 // indirect
github.com/go-openapi/errors v0.22.1 // indirect
github.com/go-openapi/strfmt v0.23.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/go-resty/resty/v2 v2.16.5 // indirect
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/google/s2a-go v0.1.9 // indirect
@ -270,35 +277,37 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lanrat/extsort v1.0.2 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lpar/date v1.0.0 // indirect
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/montanaflynn/stats v0.7.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/nats.go v1.40.1 // indirect
github.com/nats-io/nkeys v0.4.10 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/ncw/swift/v2 v2.0.3 // indirect
github.com/ncw/swift/v2 v2.0.4 // indirect
github.com/nxadm/tail v1.4.11 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo/v2 v2.19.0 // indirect
github.com/onsi/gomega v1.34.1 // indirect
github.com/oklog/ulid v1.3.1 // indirect
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/oracle/oci-go-sdk/v65 v65.80.0 // indirect
github.com/panjf2000/ants/v2 v2.9.1 // indirect
github.com/oracle/oci-go-sdk/v65 v65.93.0 // indirect
github.com/panjf2000/ants/v2 v2.11.3 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pierrec/lz4/v4 v4.1.21 // indirect
github.com/pingcap/errors v0.11.5-0.20211224045212-9687c2b0f87c // indirect
github.com/pingcap/failpoint v0.0.0-20220801062533-2eaa32854a6c // indirect
@ -307,31 +316,31 @@ require (
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
github.com/pkg/xattr v0.4.10 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
github.com/relvacode/iso8601 v1.3.0 // indirect
github.com/relvacode/iso8601 v1.6.0 // indirect
github.com/rfjakob/eme v1.1.2 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/samber/lo v1.47.0 // indirect
github.com/shirou/gopsutil/v4 v4.24.12 // indirect
github.com/samber/lo v1.50.0 // indirect
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/smartystreets/goconvey v1.8.1 // indirect
github.com/sony/gobreaker v0.5.0 // indirect
github.com/sony/gobreaker v1.0.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002 // indirect
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect
github.com/tarantool/go-iproto v1.1.0 // indirect
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect
github.com/tinylib/msgp v1.1.8 // indirect
github.com/tklauser/go-sysconf v0.3.13 // indirect
github.com/tklauser/numcpus v0.7.0 // indirect
github.com/tinylib/msgp v1.3.0 // indirect
github.com/tklauser/go-sysconf v0.3.15 // indirect
github.com/tklauser/numcpus v0.10.0 // indirect
github.com/twmb/murmur3 v1.1.3 // indirect
github.com/unknwon/goconfig v1.0.0 // indirect
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
@ -343,36 +352,37 @@ require (
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
github.com/zeebo/blake3 v0.2.4 // indirect
github.com/zeebo/errs v1.4.0 // indirect
go.etcd.io/bbolt v1.3.10 // indirect
go.etcd.io/etcd/api/v3 v3.6.0 // indirect
go.etcd.io/bbolt v1.4.0 // indirect
go.etcd.io/etcd/api/v3 v3.6.1 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/term v0.32.0 // indirect
golang.org/x/time v0.11.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect
golang.org/x/time v0.12.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/validator.v2 v2.0.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
modernc.org/libc v1.62.1 // indirect
modernc.org/libc v1.65.10 // indirect
moul.io/http2curl/v2 v2.3.0 // indirect
storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect
storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 // indirect
storj.io/eventkit v0.0.0-20240415002644-1d9596fee086 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
storj.io/common v0.0.0-20250605163628-70ca83b6228e // indirect
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
storj.io/infectious v0.0.2 // indirect
storj.io/picobuf v0.0.3 // indirect
storj.io/picobuf v0.0.4 // indirect
storj.io/uplink v1.13.1 // indirect
)

596
go.sum

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,6 @@
apiVersion: v1
description: SeaweedFS
name: seaweedfs
appVersion: "3.88"
appVersion: "3.92"
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
version: 4.0.388
version: 4.0.392

View file

@ -144,3 +144,8 @@ stringData:
# this key must be an inline json config file
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"snu8yoP6QAlY0ne4","secretKey":"PNzBcmeLNEdR0oviwm04NQAicOrDH1Km"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"SCigFee6c5lbi04A","secretKey":"kgFhbT38R8WUYVtiFQ1OiSVOrYr3NKku"}],"actions":["Read"]}]}'
```
## Enterprise
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
which has a self-healing storage format with better data protection.

View file

@ -162,6 +162,9 @@ spec:
{{- if .Values.filer.metricsPort }}
-metricsPort={{ .Values.filer.metricsPort }} \
{{- end }}
{{- if .Values.filer.metricsIp }}
-metricsIp={{ .Values.filer.metricsIp }} \
{{- end }}
{{- if .Values.filer.redirectOnRead }}
-redirectOnRead \
{{- end }}
@ -187,6 +190,7 @@ spec:
-encryptVolumeData \
{{- end }}
-ip=${POD_IP} \
-ip.bind={{ .Values.filer.ipBind }} \
{{- if .Values.filer.filerGroup}}
-filerGroup={{ .Values.filer.filerGroup}} \
{{- end }}
@ -219,7 +223,10 @@ spec:
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
{{- end }}
{{- end }}
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
{{- range .Values.filer.extraArgs }}
{{ . }} \
{{- end }}
volumeMounts:
{{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }}
- name: seaweedfs-filer-log-volume

View file

@ -157,18 +157,36 @@ spec:
{{- if .Values.master.metricsPort }}
-metricsPort={{ .Values.master.metricsPort }} \
{{- end }}
{{- if .Values.master.metricsIp }}
-metricsIp={{ .Values.master.metricsIp }} \
{{- end }}
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
{{- if .Values.master.disableHttp }}
-disableHttp \
{{- end }}
{{- if .Values.master.pulseSeconds }}
-pulseSeconds={{ .Values.master.pulseSeconds }} \
{{- if .Values.master.resumeState }}
-resumeState \
{{- end }}
{{- if .Values.master.raftHashicorp }}
-raftHashicorp \
{{- end }}
{{- if .Values.master.raftBootstrap }}
-raftBootstrap \
{{- end }}
{{- if .Values.master.electionTimeout }}
-electionTimeout={{ .Values.master.electionTimeout }} \
{{- end }}
{{- if .Values.master.heartbeatInterval }}
-heartbeatInterval={{ .Values.master.heartbeatInterval }} \
{{- end }}
{{- if .Values.master.garbageThreshold }}
-garbageThreshold={{ .Values.master.garbageThreshold }} \
{{- end }}
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} \
{{- range .Values.master.extraArgs }}
{{ . }} \
{{- end }}
volumeMounts:
- name : data-{{ .Release.Namespace }}
mountPath: /data

View file

@ -150,6 +150,9 @@ spec:
{{- if .Values.volume.metricsPort }}
-metricsPort={{ .Values.volume.metricsPort }} \
{{- end }}
{{- if .Values.volume.metricsIp }}
-metricsIp={{ .Values.volume.metricsIp }} \
{{- end }}
-dir {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}/{{$dir.name}}{{end}} \
{{- if .Values.volume.idx }}
-dir.idx=/idx \
@ -183,7 +186,10 @@ spec:
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume.{{ .Release.Namespace }} \
-compactionMBps={{ .Values.volume.compactionMBps }} \
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
{{- range .Values.volume.extraArgs }}
{{ . }} \
{{- end }}
volumeMounts:
{{- range $dir := .Values.volume.dataDirs }}
{{- if not ( eq $dir.type "custom" ) }}

View file

@ -56,12 +56,11 @@ master:
port: 9333
grpcPort: 19333
metricsPort: 9327
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
ipBind: "0.0.0.0"
volumePreallocate: false
volumeSizeLimitMB: 1000
loggingOverrideLevel: null
# number of seconds between heartbeats, default 5
pulseSeconds: null
# threshold to vacuum and reclaim spaces, default 0.3 (30%)
garbageThreshold: null
# Prometheus push interval in seconds, default 15
@ -75,6 +74,25 @@ master:
# Disable http request, only gRpc operations are allowed
disableHttp: false
# Resume previous state on start master server
resumeState: false
# Use Hashicorp Raft
raftHashicorp: false
# Whether to bootstrap the Raft cluster. Only use it when use Hashicorp Raft
raftBootstrap: false
# election timeout of master servers
electionTimeout: "10s"
# heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)
heartbeatInterval: "300ms"
# Custom command line arguments to add to the master command
# Example to fix IPv6 metrics connectivity issues:
# extraArgs: ["-metricsIp", "0.0.0.0"]
# Example with multiple args:
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
extraArgs: []
config: |-
# Enter any extra configuration for master.toml here.
# It may be a multi-line string.
@ -277,6 +295,7 @@ volume:
port: 8080
grpcPort: 18080
metricsPort: 9327
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
ipBind: "0.0.0.0"
replicas: 1
loggingOverrideLevel: null
@ -289,6 +308,13 @@ volume:
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
minFreeSpacePercent: 7
# Custom command line arguments to add to the volume command
# Example to fix IPv6 metrics connectivity issues:
# extraArgs: ["-metricsIp", "0.0.0.0"]
# Example with multiple args:
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
extraArgs: []
# For each data disk you may use ANY storage-class, example with local-path-provisioner
# Annotations are optional.
# dataDirs:
@ -520,6 +546,8 @@ filer:
port: 8888
grpcPort: 18888
metricsPort: 9327
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
ipBind: "0.0.0.0" # IP address to bind to. Set to 0.0.0.0 to allow external traffic
loggingOverrideLevel: null
filerGroup: ""
# prefer to read and write to volumes in this data center (not set by default)
@ -547,6 +575,13 @@ filer:
# Disable http request, only gRpc operations are allowed
disableHttp: false
# Custom command line arguments to add to the filer command
# Example to fix IPv6 metrics connectivity issues:
# extraArgs: ["-metricsIp", "0.0.0.0"]
# Example with multiple args:
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
extraArgs: []
# Add a custom notification.toml to configure filer notifications
# Example:
# notificationConfig: |-

271
telemetry/DEPLOYMENT.md Normal file
View file

@ -0,0 +1,271 @@
# SeaweedFS Telemetry Server Deployment
This document describes how to deploy the SeaweedFS telemetry server to a remote server using GitHub Actions.
## Prerequisites
1. A remote Linux server with:
- SSH access
- systemd (for service management)
- Optional: Prometheus and Grafana (for monitoring)
2. GitHub repository secrets configured (see [Setup GitHub Secrets](#setup-github-secrets) below):
- `TELEMETRY_SSH_PRIVATE_KEY`: SSH private key for accessing the remote server
- `TELEMETRY_HOST`: Remote server hostname or IP address
- `TELEMETRY_USER`: Username for SSH access
## Setup GitHub Secrets
Before using the deployment workflow, you need to configure the required secrets in your GitHub repository.
### Step 1: Generate SSH Key Pair
On your local machine, generate a new SSH key pair specifically for deployment:
```bash
# Generate a new SSH key pair
ssh-keygen -t ed25519 -C "seaweedfs-telemetry-deploy" -f ~/.ssh/seaweedfs_telemetry_deploy
# This creates two files:
# ~/.ssh/seaweedfs_telemetry_deploy (private key)
# ~/.ssh/seaweedfs_telemetry_deploy.pub (public key)
```
### Step 2: Configure Remote Server
Copy the public key to your remote server:
```bash
# Copy public key to remote server
ssh-copy-id -i ~/.ssh/seaweedfs_telemetry_deploy.pub user@your-server.com
# Or manually append to authorized_keys
cat ~/.ssh/seaweedfs_telemetry_deploy.pub | ssh user@your-server.com "mkdir -p ~/.ssh && cat >> ~/.ssh/authorized_keys"
```
Test the SSH connection:
```bash
# Test SSH connection with the new key
ssh -i ~/.ssh/seaweedfs_telemetry_deploy user@your-server.com "echo 'SSH connection successful'"
```
### Step 3: Add Secrets to GitHub Repository
1. Go to your GitHub repository
2. Click on **Settings** tab
3. In the sidebar, click **Secrets and variables** → **Actions**
4. Click **New repository secret** for each of the following:
#### TELEMETRY_SSH_PRIVATE_KEY
```bash
# Display the private key content
cat ~/.ssh/seaweedfs_telemetry_deploy
```
- **Name**: `TELEMETRY_SSH_PRIVATE_KEY`
- **Value**: Copy the entire private key content, including the `-----BEGIN OPENSSH PRIVATE KEY-----` and `-----END OPENSSH PRIVATE KEY-----` lines
#### TELEMETRY_HOST
- **Name**: `TELEMETRY_HOST`
- **Value**: Your server's hostname or IP address (e.g., `telemetry.example.com` or `192.168.1.100`)
#### TELEMETRY_USER
- **Name**: `TELEMETRY_USER`
- **Value**: The username on the remote server (e.g., `ubuntu`, `deploy`, or your username)
### Step 4: Verify Configuration
Create a simple test workflow or manually trigger the deployment to verify the secrets are working correctly.
### Security Best Practices
1. **Dedicated SSH Key**: Use a separate SSH key only for deployment
2. **Limited Permissions**: Create a dedicated user on the remote server with minimal required permissions
3. **Key Rotation**: Regularly rotate SSH keys
4. **Server Access**: Restrict SSH access to specific IP ranges if possible
### Example Server Setup
If you're setting up a new server, here's a basic configuration:
```bash
# On the remote server, create a dedicated user for deployment
sudo useradd -m -s /bin/bash seaweedfs-deploy
sudo usermod -aG sudo seaweedfs-deploy # Only if sudo access is needed
# Switch to the deployment user
sudo su - seaweedfs-deploy
# Create SSH directory
mkdir -p ~/.ssh
chmod 700 ~/.ssh
# Add your public key (paste the content of seaweedfs_telemetry_deploy.pub)
nano ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
```
### Troubleshooting
#### SSH Connection Issues
```bash
# Test SSH connection manually
ssh -i ~/.ssh/seaweedfs_telemetry_deploy -v user@your-server.com
# Check SSH key permissions
ls -la ~/.ssh/seaweedfs_telemetry_deploy*
# Should show: -rw------- for private key, -rw-r--r-- for public key
```
#### GitHub Actions Fails
1. **Check secrets**: Ensure all three secrets are properly set in GitHub
2. **Verify SSH key**: Make sure the entire private key (including headers/footers) is copied
3. **Test connectivity**: Manually SSH to the server from your local machine
4. **Check user permissions**: Ensure the remote user has necessary permissions
## GitHub Actions Workflow
The deployment workflow (`.github/workflows/deploy_telemetry.yml`) provides two main operations:
### 1. First-time Setup
Run this once to set up the remote server:
1. Go to GitHub Actions in your repository
2. Select "Deploy Telemetry Server" workflow
3. Click "Run workflow"
4. Check "Run first-time server setup"
5. Click "Run workflow"
This will:
- Create necessary directories on the remote server
- Set up systemd service configuration
- Configure log rotation
- Upload Grafana dashboard and Prometheus configuration
- Enable the telemetry service (but not start it yet)
**Note**: The setup only prepares the infrastructure. You need to run a deployment afterward to install and start the telemetry server.
### 2. Deploy Updates
To deploy updates, manually trigger deployment:
1. Go to GitHub Actions in your repository
2. Select "Deploy Telemetry Server" workflow
3. Click "Run workflow"
4. Check "Deploy telemetry server to remote server"
5. Click "Run workflow"
## Server Directory Structure
After setup, the remote server will have:
```
~/seaweedfs-telemetry/
├── bin/
│ └── telemetry-server # Binary executable
├── logs/
│ ├── telemetry.log # Application logs
│ └── telemetry.error.log # Error logs
├── data/ # Data directory (if needed)
├── grafana-dashboard.json # Grafana dashboard configuration
└── prometheus.yml # Prometheus configuration
```
## Service Management
The telemetry server runs as a systemd service:
```bash
# Check service status
sudo systemctl status telemetry.service
# View logs
sudo journalctl -u telemetry.service -f
# Restart service
sudo systemctl restart telemetry.service
# Stop/start service
sudo systemctl stop telemetry.service
sudo systemctl start telemetry.service
```
## Accessing the Service
After deployment, the telemetry server will be available at:
- **Dashboard**: `http://your-server:8353`
- **API**: `http://your-server:8353/api/*`
- **Metrics**: `http://your-server:8353/metrics`
- **Health Check**: `http://your-server:8353/health`
## Optional: Prometheus and Grafana Integration
### Prometheus Setup
1. Install Prometheus on your server
2. Update `/etc/prometheus/prometheus.yml` to include:
```yaml
scrape_configs:
- job_name: 'seaweedfs-telemetry'
static_configs:
- targets: ['localhost:8353']
metrics_path: '/metrics'
```
### Grafana Setup
1. Install Grafana on your server
2. Import the dashboard from `~/seaweedfs-telemetry/grafana-dashboard.json`
3. Configure Prometheus as a data source pointing to your Prometheus instance
## Troubleshooting
### Deployment Fails
1. Check GitHub Actions logs for detailed error messages
2. Verify SSH connectivity: `ssh user@host`
3. Ensure all required secrets are configured in GitHub
### Service Won't Start
1. Check service logs: `sudo journalctl -u telemetry.service`
2. Verify binary permissions: `ls -la ~/seaweedfs-telemetry/bin/`
3. Test binary manually: `~/seaweedfs-telemetry/bin/telemetry-server -help`
### Port Conflicts
If port 8353 is already in use:
1. Edit the systemd service: `sudo systemctl edit telemetry.service`
2. Add override configuration:
```ini
[Service]
ExecStart=
ExecStart=/home/user/seaweedfs-telemetry/bin/telemetry-server -port=8354
```
3. Reload and restart: `sudo systemctl daemon-reload && sudo systemctl restart telemetry.service`
## Security Considerations
1. **Firewall**: Consider restricting access to telemetry ports
2. **SSH Keys**: Use dedicated SSH keys with minimal permissions
3. **User Permissions**: Run the service as a non-privileged user
4. **Network**: Consider running on internal networks only
## Monitoring
Monitor the deployment and service health:
- **GitHub Actions**: Check workflow runs for deployment status
- **System Logs**: `sudo journalctl -u telemetry.service`
- **Application Logs**: `tail -f ~/seaweedfs-telemetry/logs/telemetry.log`
- **Health Endpoint**: `curl http://localhost:8353/health`
- **Metrics**: `curl http://localhost:8353/metrics`

353
telemetry/README.md Normal file
View file

@ -0,0 +1,353 @@
# SeaweedFS Telemetry System
A privacy-respecting telemetry system for SeaweedFS that collects cluster-level usage statistics and provides visualization through Prometheus and Grafana.
## Features
- **Privacy-First Design**: Uses in-memory cluster IDs (regenerated on restart), no personal data collection
- **Prometheus Integration**: Native Prometheus metrics for monitoring and alerting
- **Grafana Dashboards**: Pre-built dashboards for data visualization
- **Protocol Buffers**: Efficient binary data transmission for optimal performance
- **Opt-in Only**: Disabled by default, requires explicit configuration
- **Docker Compose**: Complete monitoring stack deployment
- **Automatic Cleanup**: Configurable data retention policies
## Architecture
```
SeaweedFS Cluster → Telemetry Client → Telemetry Server → Prometheus → Grafana
(protobuf) (metrics) (queries)
```
## Data Transmission
The telemetry system uses **Protocol Buffers exclusively** for efficient binary data transmission:
- **Compact Format**: 30-50% smaller than JSON
- **Fast Serialization**: Better performance than text-based formats
- **Type Safety**: Strong typing with generated Go structs
- **Schema Evolution**: Built-in versioning support
### Protobuf Schema
```protobuf
message TelemetryData {
string cluster_id = 1; // In-memory generated UUID
string version = 2; // SeaweedFS version
string os = 3; // Operating system
// Field 4 reserved (was features)
// Field 5 reserved (was deployment)
int32 volume_server_count = 6; // Number of volume servers
uint64 total_disk_bytes = 7; // Total disk usage
int32 total_volume_count = 8; // Total volume count
int32 filer_count = 9; // Number of filer servers
int32 broker_count = 10; // Number of broker servers
int64 timestamp = 11; // Collection timestamp
}
```
## Privacy Approach
- **No Personal Data**: No hostnames, IP addresses, or user information
- **In-Memory IDs**: Cluster IDs are generated in-memory and change on restart
- **Aggregated Data**: Only cluster-level statistics, no individual file/user data
- **Opt-in Only**: Telemetry is disabled by default
- **Transparent**: Open source implementation, clear data collection policy
## Collected Data
| Field | Description | Example |
|-------|-------------|---------|
| `cluster_id` | In-memory UUID (changes on restart) | `a1b2c3d4-...` |
| `version` | SeaweedFS version | `3.45` |
| `os` | Operating system and architecture | `linux/amd64` |
| `volume_server_count` | Number of volume servers | `5` |
| `total_disk_bytes` | Total disk usage across cluster | `1073741824` |
| `total_volume_count` | Total number of volumes | `120` |
| `filer_count` | Number of filer servers | `2` |
| `broker_count` | Number of broker servers | `1` |
| `timestamp` | When data was collected | `1640995200` |
## Quick Start
### 1. Deploy Telemetry Server
```bash
# Clone and start the complete monitoring stack
git clone https://github.com/seaweedfs/seaweedfs.git
cd seaweedfs/telemetry
docker-compose up -d
# Or run the server directly
cd server
go run . -port=8080 -dashboard=true
```
### 2. Configure SeaweedFS
```bash
# Enable telemetry in SeaweedFS master (uses default telemetry.seaweedfs.com)
weed master -telemetry=true
# Or in server mode
weed server -telemetry=true
# Or specify custom telemetry server
weed master -telemetry=true -telemetry.url=http://localhost:8080/api/collect
```
### 3. Access Dashboards
- **Telemetry Server**: http://localhost:8080
- **Prometheus**: http://localhost:9090
- **Grafana**: http://localhost:3000 (admin/admin)
## Configuration
### SeaweedFS Master/Server
```bash
# Enable telemetry
-telemetry=true
# Set custom telemetry server URL (optional, defaults to telemetry.seaweedfs.com)
-telemetry.url=http://your-telemetry-server:8080/api/collect
```
### Telemetry Server
```bash
# Server configuration
-port=8080 # Server port
-dashboard=true # Enable built-in dashboard
-cleanup=24h # Cleanup interval
-max-age=720h # Maximum data retention (30 days)
# Example
./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h
```
## Prometheus Metrics
The telemetry server exposes these Prometheus metrics:
### Cluster Metrics
- `seaweedfs_telemetry_total_clusters`: Total unique clusters (30 days)
- `seaweedfs_telemetry_active_clusters`: Active clusters (7 days)
### Per-Cluster Metrics
- `seaweedfs_telemetry_volume_servers{cluster_id, version, os}`: Volume servers per cluster
- `seaweedfs_telemetry_disk_bytes{cluster_id, version, os}`: Disk usage per cluster
- `seaweedfs_telemetry_volume_count{cluster_id, version, os}`: Volume count per cluster
- `seaweedfs_telemetry_filer_count{cluster_id, version, os}`: Filer servers per cluster
- `seaweedfs_telemetry_broker_count{cluster_id, version, os}`: Broker servers per cluster
- `seaweedfs_telemetry_cluster_info{cluster_id, version, os}`: Cluster metadata
### Server Metrics
- `seaweedfs_telemetry_reports_received_total`: Total telemetry reports received
## API Endpoints
### Data Collection
```bash
# Submit telemetry data (protobuf only)
POST /api/collect
Content-Type: application/x-protobuf
[TelemetryRequest protobuf data]
```
### Statistics (JSON for dashboard/debugging)
```bash
# Get aggregated statistics
GET /api/stats
# Get recent cluster instances
GET /api/instances?limit=100
# Get metrics over time
GET /api/metrics?days=30
```
### Monitoring
```bash
# Prometheus metrics
GET /metrics
```
## Docker Deployment
### Complete Stack (Recommended)
```yaml
# docker-compose.yml
version: '3.8'
services:
telemetry-server:
build: ./server
ports:
- "8080:8080"
command: ["-port=8080", "-dashboard=true", "-cleanup=24h"]
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
volumes:
- ./grafana-provisioning:/etc/grafana/provisioning
- ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs.json
```
```bash
# Deploy the stack
docker-compose up -d
# Scale telemetry server if needed
docker-compose up -d --scale telemetry-server=3
```
### Server Only
```bash
# Build and run telemetry server
cd server
docker build -t seaweedfs-telemetry .
docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true
```
## Development
### Protocol Buffer Development
```bash
# Generate protobuf code
cd telemetry
protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto
# The generated code is already included in the repository
```
### Build from Source
```bash
# Build telemetry server
cd telemetry/server
go build -o telemetry-server .
# Build SeaweedFS with telemetry support
cd ../..
go build -o weed ./weed
```
### Testing
```bash
# Test telemetry server
cd telemetry/server
go test ./...
# Test protobuf communication (requires protobuf tools)
# See telemetry client code for examples
```
## Grafana Dashboard
The included Grafana dashboard provides:
- **Overview**: Total and active clusters, version distribution
- **Resource Usage**: Volume servers and disk usage over time
- **Infrastructure**: Operating system distribution and server counts
- **Growth Trends**: Historical growth patterns
### Custom Queries
```promql
# Total active clusters
seaweedfs_telemetry_active_clusters
# Disk usage by version
sum by (version) (seaweedfs_telemetry_disk_bytes)
# Volume servers by operating system
sum by (os) (seaweedfs_telemetry_volume_servers)
# Filer servers by version
sum by (version) (seaweedfs_telemetry_filer_count)
# Broker servers across all clusters
sum(seaweedfs_telemetry_broker_count)
# Growth rate (weekly)
increase(seaweedfs_telemetry_total_clusters[7d])
```
## Security Considerations
- **Network Security**: Use HTTPS in production environments
- **Access Control**: Implement authentication for Grafana and Prometheus
- **Data Retention**: Configure appropriate retention policies
- **Monitoring**: Monitor the telemetry infrastructure itself
## Troubleshooting
### Common Issues
**SeaweedFS not sending data:**
```bash
# Check telemetry configuration
weed master -h | grep telemetry
# Verify connectivity
curl -v http://your-telemetry-server:8080/api/collect
```
**Server not receiving data:**
```bash
# Check server logs
docker-compose logs telemetry-server
# Verify metrics endpoint
curl http://localhost:8080/metrics
```
**Prometheus not scraping:**
```bash
# Check Prometheus targets
curl http://localhost:9090/api/v1/targets
# Verify configuration
docker-compose logs prometheus
```
### Debugging
```bash
# Enable verbose logging in SeaweedFS
weed master -v=2 -telemetry=true
# Check telemetry server metrics
curl http://localhost:8080/metrics | grep seaweedfs_telemetry
# Test data flow
curl http://localhost:8080/api/stats
```
## Contributing
1. Fork the repository
2. Create a feature branch
3. Make your changes
4. Add tests if applicable
5. Submit a pull request
## License
This telemetry system is part of SeaweedFS and follows the same Apache 2.0 license.

View file

@ -0,0 +1,55 @@
version: '3.8'
services:
telemetry-server:
build: ./server
ports:
- "8080:8080"
command: [
"./telemetry-server",
"-port=8080",
"-dashboard=false", # Disable built-in dashboard, use Grafana
"-log=true",
"-cors=true"
]
networks:
- telemetry
prometheus:
image: prom/prometheus:latest
ports:
- "9090:9090"
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- prometheus_data:/prometheus
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
networks:
- telemetry
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_USERS_ALLOW_SIGN_UP=false
volumes:
- grafana_data:/var/lib/grafana
- ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs-telemetry.json
- ./grafana-provisioning:/etc/grafana/provisioning
networks:
- telemetry
volumes:
prometheus_data:
grafana_data:
networks:
telemetry:
driver: bridge

View file

@ -0,0 +1,734 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": null,
"links": [],
"liveNow": false,
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 1,
"options": {
"showHeader": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "seaweedfs_telemetry_total_clusters",
"format": "time_series",
"refId": "A"
}
],
"title": "Total SeaweedFS Clusters",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "thresholds"
},
"custom": {
"align": "auto",
"cellOptions": {
"type": "auto"
},
"inspect": false
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 2,
"options": {
"showHeader": true
},
"pluginVersion": "10.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "seaweedfs_telemetry_active_clusters",
"format": "time_series",
"refId": "A"
}
],
"title": "Active Clusters (7 days)",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 3,
"options": {
"legend": {
"displayMode": "visible",
"placement": "bottom",
"showLegend": true
},
"pieType": "pie",
"reduceOptions": {
"values": false,
"calcs": [
"lastNotNull"
],
"fields": ""
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "count by (version) (seaweedfs_telemetry_cluster_info)",
"format": "time_series",
"legendFormat": "{{version}}",
"refId": "A"
}
],
"title": "SeaweedFS Version Distribution",
"type": "piechart"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
}
},
"mappings": []
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 4,
"options": {
"legend": {
"displayMode": "visible",
"placement": "bottom",
"showLegend": true
},
"pieType": "pie",
"reduceOptions": {
"values": false,
"calcs": [
"lastNotNull"
],
"fields": ""
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "count by (os) (seaweedfs_telemetry_cluster_info)",
"format": "time_series",
"legendFormat": "{{os}}",
"refId": "A"
}
],
"title": "Operating System Distribution",
"type": "piechart"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 24,
"x": 0,
"y": 16
},
"id": 5,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(seaweedfs_telemetry_volume_servers)",
"format": "time_series",
"legendFormat": "Total Volume Servers",
"refId": "A"
}
],
"title": "Total Volume Servers Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
},
"unit": "bytes"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 24
},
"id": 6,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(seaweedfs_telemetry_disk_bytes)",
"format": "time_series",
"legendFormat": "Total Disk Usage",
"refId": "A"
}
],
"title": "Total Disk Usage Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 24
},
"id": 7,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(seaweedfs_telemetry_volume_count)",
"format": "time_series",
"legendFormat": "Total Volume Count",
"refId": "A"
}
],
"title": "Total Volume Count Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 32
},
"id": 8,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(seaweedfs_telemetry_filer_count)",
"format": "time_series",
"legendFormat": "Total Filer Count",
"refId": "A"
}
],
"title": "Total Filer Servers Over Time",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"vis": false
},
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 32
},
"id": 9,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "${DS_PROMETHEUS}"
},
"expr": "sum(seaweedfs_telemetry_broker_count)",
"format": "time_series",
"legendFormat": "Total Broker Count",
"refId": "A"
}
],
"title": "Total Broker Servers Over Time",
"type": "timeseries"
}
],
"refresh": "5m",
"schemaVersion": 38,
"style": "dark",
"tags": [
"seaweedfs",
"telemetry"
],
"templating": {
"list": []
},
"time": {
"from": "now-24h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "SeaweedFS Telemetry Dashboard",
"uid": "seaweedfs-telemetry",
"version": 1,
"weekStart": ""
}

View file

@ -0,0 +1,12 @@
apiVersion: 1
providers:
- name: 'seaweedfs'
orgId: 1
folder: ''
type: file
disableDeletion: false
updateIntervalSeconds: 10
allowUiUpdates: true
options:
path: /var/lib/grafana/dashboards

View file

@ -0,0 +1,9 @@
apiVersion: 1
datasources:
- name: Prometheus
type: prometheus
access: proxy
url: http://prometheus:9090
isDefault: true
editable: true

15
telemetry/prometheus.yml Normal file
View file

@ -0,0 +1,15 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
scrape_configs:
- job_name: 'seaweedfs-telemetry'
static_configs:
- targets: ['telemetry-server:8080']
scrape_interval: 30s
metrics_path: '/metrics'
scrape_timeout: 10s

View file

@ -0,0 +1,377 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.2
// protoc v5.29.3
// source: telemetry.proto
package proto
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// TelemetryData represents cluster-level telemetry information
type TelemetryData struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Unique cluster identifier (generated in-memory)
ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
// SeaweedFS version
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
// Operating system (e.g., "linux/amd64")
Os string `protobuf:"bytes,3,opt,name=os,proto3" json:"os,omitempty"`
// Number of volume servers in the cluster
VolumeServerCount int32 `protobuf:"varint,6,opt,name=volume_server_count,json=volumeServerCount,proto3" json:"volume_server_count,omitempty"`
// Total disk usage across all volume servers (in bytes)
TotalDiskBytes uint64 `protobuf:"varint,7,opt,name=total_disk_bytes,json=totalDiskBytes,proto3" json:"total_disk_bytes,omitempty"`
// Total number of volumes in the cluster
TotalVolumeCount int32 `protobuf:"varint,8,opt,name=total_volume_count,json=totalVolumeCount,proto3" json:"total_volume_count,omitempty"`
// Number of filer servers in the cluster
FilerCount int32 `protobuf:"varint,9,opt,name=filer_count,json=filerCount,proto3" json:"filer_count,omitempty"`
// Number of broker servers in the cluster
BrokerCount int32 `protobuf:"varint,10,opt,name=broker_count,json=brokerCount,proto3" json:"broker_count,omitempty"`
// Unix timestamp when the data was collected
Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
func (x *TelemetryData) Reset() {
*x = TelemetryData{}
if protoimpl.UnsafeEnabled {
mi := &file_telemetry_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryData) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryData) ProtoMessage() {}
func (x *TelemetryData) ProtoReflect() protoreflect.Message {
mi := &file_telemetry_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryData.ProtoReflect.Descriptor instead.
func (*TelemetryData) Descriptor() ([]byte, []int) {
return file_telemetry_proto_rawDescGZIP(), []int{0}
}
func (x *TelemetryData) GetClusterId() string {
if x != nil {
return x.ClusterId
}
return ""
}
func (x *TelemetryData) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *TelemetryData) GetOs() string {
if x != nil {
return x.Os
}
return ""
}
func (x *TelemetryData) GetVolumeServerCount() int32 {
if x != nil {
return x.VolumeServerCount
}
return 0
}
func (x *TelemetryData) GetTotalDiskBytes() uint64 {
if x != nil {
return x.TotalDiskBytes
}
return 0
}
func (x *TelemetryData) GetTotalVolumeCount() int32 {
if x != nil {
return x.TotalVolumeCount
}
return 0
}
func (x *TelemetryData) GetFilerCount() int32 {
if x != nil {
return x.FilerCount
}
return 0
}
func (x *TelemetryData) GetBrokerCount() int32 {
if x != nil {
return x.BrokerCount
}
return 0
}
func (x *TelemetryData) GetTimestamp() int64 {
if x != nil {
return x.Timestamp
}
return 0
}
// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server
type TelemetryRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Data *TelemetryData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (x *TelemetryRequest) Reset() {
*x = TelemetryRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_telemetry_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryRequest) ProtoMessage() {}
func (x *TelemetryRequest) ProtoReflect() protoreflect.Message {
mi := &file_telemetry_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryRequest.ProtoReflect.Descriptor instead.
func (*TelemetryRequest) Descriptor() ([]byte, []int) {
return file_telemetry_proto_rawDescGZIP(), []int{1}
}
func (x *TelemetryRequest) GetData() *TelemetryData {
if x != nil {
return x.Data
}
return nil
}
// TelemetryResponse is returned by the telemetry server
type TelemetryResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
}
func (x *TelemetryResponse) Reset() {
*x = TelemetryResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_telemetry_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TelemetryResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TelemetryResponse) ProtoMessage() {}
func (x *TelemetryResponse) ProtoReflect() protoreflect.Message {
mi := &file_telemetry_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TelemetryResponse.ProtoReflect.Descriptor instead.
func (*TelemetryResponse) Descriptor() ([]byte, []int) {
return file_telemetry_proto_rawDescGZIP(), []int{2}
}
func (x *TelemetryResponse) GetSuccess() bool {
if x != nil {
return x.Success
}
return false
}
func (x *TelemetryResponse) GetMessage() string {
if x != nil {
return x.Message
}
return ""
}
var File_telemetry_proto protoreflect.FileDescriptor
var file_telemetry_proto_rawDesc = []byte{
0x0a, 0x0f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x12, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xce, 0x02, 0x0a,
0x0d, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d,
0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a,
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, 0x03, 0x20,
0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06,
0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76,
0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c,
0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x42, 0x79, 0x74, 0x65,
0x73, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x74,
0x6f, 0x74, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09,
0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x12, 0x21, 0x0a, 0x0c, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x40, 0x0a,
0x10, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x12, 0x2c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x18, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x54, 0x65, 0x6c, 0x65,
0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22,
0x47, 0x0a, 0x11, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18,
0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18,
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68,
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d,
0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
var (
file_telemetry_proto_rawDescOnce sync.Once
file_telemetry_proto_rawDescData = file_telemetry_proto_rawDesc
)
func file_telemetry_proto_rawDescGZIP() []byte {
file_telemetry_proto_rawDescOnce.Do(func() {
file_telemetry_proto_rawDescData = protoimpl.X.CompressGZIP(file_telemetry_proto_rawDescData)
})
return file_telemetry_proto_rawDescData
}
var file_telemetry_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
var file_telemetry_proto_goTypes = []any{
(*TelemetryData)(nil), // 0: telemetry.TelemetryData
(*TelemetryRequest)(nil), // 1: telemetry.TelemetryRequest
(*TelemetryResponse)(nil), // 2: telemetry.TelemetryResponse
}
var file_telemetry_proto_depIdxs = []int32{
0, // 0: telemetry.TelemetryRequest.data:type_name -> telemetry.TelemetryData
1, // [1:1] is the sub-list for method output_type
1, // [1:1] is the sub-list for method input_type
1, // [1:1] is the sub-list for extension type_name
1, // [1:1] is the sub-list for extension extendee
0, // [0:1] is the sub-list for field type_name
}
func init() { file_telemetry_proto_init() }
func file_telemetry_proto_init() {
if File_telemetry_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_telemetry_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*TelemetryData); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_telemetry_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*TelemetryRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_telemetry_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*TelemetryResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_telemetry_proto_rawDesc,
NumEnums: 0,
NumMessages: 3,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_telemetry_proto_goTypes,
DependencyIndexes: file_telemetry_proto_depIdxs,
MessageInfos: file_telemetry_proto_msgTypes,
}.Build()
File_telemetry_proto = out.File
file_telemetry_proto_rawDesc = nil
file_telemetry_proto_goTypes = nil
file_telemetry_proto_depIdxs = nil
}

View file

@ -0,0 +1,52 @@
syntax = "proto3";
package telemetry;
option go_package = "github.com/seaweedfs/seaweedfs/telemetry/proto";
// TelemetryData represents cluster-level telemetry information
message TelemetryData {
// Unique cluster identifier (generated in-memory)
string cluster_id = 1;
// SeaweedFS version
string version = 2;
// Operating system (e.g., "linux/amd64")
string os = 3;
// Field 4 reserved (was features)
reserved 4;
// Field 5 reserved (was deployment)
reserved 5;
// Number of volume servers in the cluster
int32 volume_server_count = 6;
// Total disk usage across all volume servers (in bytes)
uint64 total_disk_bytes = 7;
// Total number of volumes in the cluster
int32 total_volume_count = 8;
// Number of filer servers in the cluster
int32 filer_count = 9;
// Number of broker servers in the cluster
int32 broker_count = 10;
// Unix timestamp when the data was collected
int64 timestamp = 11;
}
// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server
message TelemetryRequest {
TelemetryData data = 1;
}
// TelemetryResponse is returned by the telemetry server
message TelemetryResponse {
bool success = 1;
string message = 2;
}

View file

@ -0,0 +1,18 @@
FROM golang:1.21-alpine AS builder
WORKDIR /app
COPY go.mod go.sum ./
RUN go mod download
COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o telemetry-server .
FROM alpine:latest
RUN apk --no-cache add ca-certificates
WORKDIR /root/
COPY --from=builder /app/telemetry-server .
EXPOSE 8080
CMD ["./telemetry-server"]

97
telemetry/server/Makefile Normal file
View file

@ -0,0 +1,97 @@
.PHONY: build run clean test deps proto integration-test test-all
# Build the telemetry server
build:
go build -o telemetry-server .
# Run the server in development mode
run:
go run . -port=8080 -dashboard=true -cleanup=1h -max-age=24h
# Run the server in production mode
run-prod:
./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h
# Clean build artifacts
clean:
rm -f telemetry-server
rm -f ../test/telemetry-server-test.log
go clean
# Run unit tests
test:
go test ./...
# Run integration tests
integration-test:
@echo "🧪 Running telemetry integration tests..."
cd ../../ && go run telemetry/test/integration.go
# Run all tests (unit + integration)
test-all: test integration-test
# Install dependencies
deps:
go mod download
go mod tidy
# Generate protobuf code (requires protoc)
proto:
cd .. && protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto
# Build Docker image
docker-build:
docker build -t seaweedfs-telemetry .
# Run with Docker
docker-run:
docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true
# Development with auto-reload (requires air: go install github.com/cosmtrek/air@latest)
dev:
air
# Check if protoc is available
check-protoc:
@which protoc > /dev/null || (echo "protoc is required for proto generation. Install from https://grpc.io/docs/protoc-installation/" && exit 1)
# Full development setup
setup: check-protoc deps proto build
# Run a quick smoke test
smoke-test: build
@echo "🔥 Running smoke test..."
@timeout 10s ./telemetry-server -port=18081 > /dev/null 2>&1 & \
SERVER_PID=$$!; \
sleep 2; \
if curl -s http://localhost:18081/health > /dev/null; then \
echo "✅ Smoke test passed - server responds to health check"; \
else \
echo "❌ Smoke test failed - server not responding"; \
exit 1; \
fi; \
kill $$SERVER_PID 2>/dev/null || true
# Continuous integration target
ci: deps proto build test integration-test
@echo "🎉 All CI tests passed!"
# Help
help:
@echo "Available targets:"
@echo " build - Build the telemetry server binary"
@echo " run - Run server in development mode"
@echo " run-prod - Run server in production mode"
@echo " clean - Clean build artifacts"
@echo " test - Run unit tests"
@echo " integration-test- Run integration tests"
@echo " test-all - Run all tests (unit + integration)"
@echo " deps - Install Go dependencies"
@echo " proto - Generate protobuf code"
@echo " docker-build - Build Docker image"
@echo " docker-run - Run with Docker"
@echo " dev - Run with auto-reload (requires air)"
@echo " smoke-test - Quick server health check"
@echo " setup - Full development setup"
@echo " ci - Continuous integration (all tests)"
@echo " help - Show this help"

View file

@ -0,0 +1,152 @@
package api
import (
"encoding/json"
"io"
"net/http"
"strconv"
"time"
"github.com/seaweedfs/seaweedfs/telemetry/proto"
"github.com/seaweedfs/seaweedfs/telemetry/server/storage"
protobuf "google.golang.org/protobuf/proto"
)
type Handler struct {
storage *storage.PrometheusStorage
}
func NewHandler(storage *storage.PrometheusStorage) *Handler {
return &Handler{storage: storage}
}
func (h *Handler) CollectTelemetry(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
contentType := r.Header.Get("Content-Type")
// Only accept protobuf content type
if contentType != "application/x-protobuf" && contentType != "application/protobuf" {
http.Error(w, "Content-Type must be application/x-protobuf", http.StatusUnsupportedMediaType)
return
}
// Read protobuf request
body, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "Failed to read request body", http.StatusBadRequest)
return
}
req := &proto.TelemetryRequest{}
if err := protobuf.Unmarshal(body, req); err != nil {
http.Error(w, "Invalid protobuf data", http.StatusBadRequest)
return
}
data := req.Data
if data == nil {
http.Error(w, "Missing telemetry data", http.StatusBadRequest)
return
}
// Validate required fields
if data.ClusterId == "" || data.Version == "" || data.Os == "" {
http.Error(w, "Missing required fields", http.StatusBadRequest)
return
}
// Set timestamp if not provided
if data.Timestamp == 0 {
data.Timestamp = time.Now().Unix()
}
// Store the telemetry data
if err := h.storage.StoreTelemetry(data); err != nil {
http.Error(w, "Failed to store data", http.StatusInternalServerError)
return
}
// Return protobuf response
resp := &proto.TelemetryResponse{
Success: true,
Message: "Telemetry data received",
}
respData, err := protobuf.Marshal(resp)
if err != nil {
http.Error(w, "Failed to marshal response", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/x-protobuf")
w.WriteHeader(http.StatusOK)
w.Write(respData)
}
func (h *Handler) GetStats(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
stats, err := h.storage.GetStats()
if err != nil {
http.Error(w, "Failed to get stats", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(stats)
}
func (h *Handler) GetInstances(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
limitStr := r.URL.Query().Get("limit")
limit := 100 // default
if limitStr != "" {
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 {
limit = l
}
}
instances, err := h.storage.GetInstances(limit)
if err != nil {
http.Error(w, "Failed to get instances", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(instances)
}
func (h *Handler) GetMetrics(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodGet {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
daysStr := r.URL.Query().Get("days")
days := 30 // default
if daysStr != "" {
if d, err := strconv.Atoi(daysStr); err == nil && d > 0 && d <= 365 {
days = d
}
}
metrics, err := h.storage.GetMetrics(days)
if err != nil {
http.Error(w, "Failed to get metrics", http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(metrics)
}

View file

@ -0,0 +1,274 @@
package dashboard
import (
"net/http"
)
type Handler struct{}
func NewHandler() *Handler {
return &Handler{}
}
func (h *Handler) ServeIndex(w http.ResponseWriter, r *http.Request) {
html := `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>SeaweedFS Telemetry Dashboard</title>
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
margin: 0;
padding: 20px;
background-color: #f5f5f5;
}
.container {
max-width: 1200px;
margin: 0 auto;
}
.header {
background: white;
padding: 20px;
border-radius: 8px;
margin-bottom: 20px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.stats-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
gap: 20px;
margin-bottom: 20px;
}
.stat-card {
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.stat-value {
font-size: 2em;
font-weight: bold;
color: #2196F3;
}
.stat-label {
color: #666;
margin-top: 5px;
}
.chart-container {
background: white;
padding: 20px;
border-radius: 8px;
margin-bottom: 20px;
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
}
.chart-title {
font-size: 1.2em;
font-weight: bold;
margin-bottom: 15px;
}
.loading {
text-align: center;
padding: 40px;
color: #666;
}
.error {
background: #ffebee;
color: #c62828;
padding: 15px;
border-radius: 4px;
margin: 10px 0;
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>SeaweedFS Telemetry Dashboard</h1>
<p>Privacy-respecting usage analytics for SeaweedFS</p>
</div>
<div id="loading" class="loading">Loading telemetry data...</div>
<div id="error" class="error" style="display: none;"></div>
<div id="dashboard" style="display: none;">
<div class="stats-grid">
<div class="stat-card">
<div class="stat-value" id="totalInstances">-</div>
<div class="stat-label">Total Instances (30 days)</div>
</div>
<div class="stat-card">
<div class="stat-value" id="activeInstances">-</div>
<div class="stat-label">Active Instances (7 days)</div>
</div>
<div class="stat-card">
<div class="stat-value" id="totalVersions">-</div>
<div class="stat-label">Different Versions</div>
</div>
<div class="stat-card">
<div class="stat-value" id="totalOS">-</div>
<div class="stat-label">Operating Systems</div>
</div>
</div>
<div class="chart-container">
<div class="chart-title">Version Distribution</div>
<canvas id="versionChart" width="400" height="200"></canvas>
</div>
<div class="chart-container">
<div class="chart-title">Operating System Distribution</div>
<canvas id="osChart" width="400" height="200"></canvas>
</div>
<div class="chart-container">
<div class="chart-title">Volume Servers Over Time</div>
<canvas id="serverChart" width="400" height="200"></canvas>
</div>
<div class="chart-container">
<div class="chart-title">Total Disk Usage Over Time</div>
<canvas id="diskChart" width="400" height="200"></canvas>
</div>
</div>
</div>
<script>
let charts = {};
async function loadDashboard() {
try {
// Load stats
const statsResponse = await fetch('/api/stats');
const stats = await statsResponse.json();
// Load metrics
const metricsResponse = await fetch('/api/metrics?days=30');
const metrics = await metricsResponse.json();
updateStats(stats);
updateCharts(stats, metrics);
document.getElementById('loading').style.display = 'none';
document.getElementById('dashboard').style.display = 'block';
} catch (error) {
console.error('Error loading dashboard:', error);
showError('Failed to load telemetry data: ' + error.message);
}
}
function updateStats(stats) {
document.getElementById('totalInstances').textContent = stats.total_instances || 0;
document.getElementById('activeInstances').textContent = stats.active_instances || 0;
document.getElementById('totalVersions').textContent = Object.keys(stats.versions || {}).length;
document.getElementById('totalOS').textContent = Object.keys(stats.os_distribution || {}).length;
}
function updateCharts(stats, metrics) {
// Version chart
createPieChart('versionChart', 'Version Distribution', stats.versions || {});
// OS chart
createPieChart('osChart', 'Operating System Distribution', stats.os_distribution || {});
// Server count over time
if (metrics.dates && metrics.server_counts) {
createLineChart('serverChart', 'Volume Servers', metrics.dates, metrics.server_counts, '#2196F3');
}
// Disk usage over time
if (metrics.dates && metrics.disk_usage) {
const diskUsageGB = metrics.disk_usage.map(bytes => Math.round(bytes / (1024 * 1024 * 1024)));
createLineChart('diskChart', 'Disk Usage (GB)', metrics.dates, diskUsageGB, '#4CAF50');
}
}
function createPieChart(canvasId, title, data) {
const ctx = document.getElementById(canvasId).getContext('2d');
if (charts[canvasId]) {
charts[canvasId].destroy();
}
const labels = Object.keys(data);
const values = Object.values(data);
charts[canvasId] = new Chart(ctx, {
type: 'pie',
data: {
labels: labels,
datasets: [{
data: values,
backgroundColor: [
'#FF6384', '#36A2EB', '#FFCE56', '#4BC0C0',
'#9966FF', '#FF9F40', '#FF6384', '#C9CBCF'
]
}]
},
options: {
responsive: true,
plugins: {
legend: {
position: 'bottom'
}
}
}
});
}
function createLineChart(canvasId, label, labels, data, color) {
const ctx = document.getElementById(canvasId).getContext('2d');
if (charts[canvasId]) {
charts[canvasId].destroy();
}
charts[canvasId] = new Chart(ctx, {
type: 'line',
data: {
labels: labels,
datasets: [{
label: label,
data: data,
borderColor: color,
backgroundColor: color + '20',
fill: true,
tension: 0.1
}]
},
options: {
responsive: true,
scales: {
y: {
beginAtZero: true
}
}
}
});
}
function showError(message) {
document.getElementById('loading').style.display = 'none';
document.getElementById('error').style.display = 'block';
document.getElementById('error').textContent = message;
}
// Load dashboard on page load
loadDashboard();
// Refresh every 5 minutes
setInterval(loadDashboard, 5 * 60 * 1000);
</script>
</body>
</html>`
w.Header().Set("Content-Type", "text/html")
w.WriteHeader(http.StatusOK)
w.Write([]byte(html))
}

31
telemetry/server/go.sum Normal file
View file

@ -0,0 +1,31 @@
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=

111
telemetry/server/main.go Normal file
View file

@ -0,0 +1,111 @@
package main
import (
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/seaweedfs/seaweedfs/telemetry/server/api"
"github.com/seaweedfs/seaweedfs/telemetry/server/dashboard"
"github.com/seaweedfs/seaweedfs/telemetry/server/storage"
)
var (
port = flag.Int("port", 8080, "HTTP server port")
enableCORS = flag.Bool("cors", true, "Enable CORS for dashboard")
logRequests = flag.Bool("log", true, "Log incoming requests")
enableDashboard = flag.Bool("dashboard", true, "Enable built-in dashboard (optional when using Grafana)")
cleanupInterval = flag.Duration("cleanup", 24*time.Hour, "Cleanup interval for old instances")
maxInstanceAge = flag.Duration("max-age", 30*24*time.Hour, "Maximum age for instances before cleanup")
)
func main() {
flag.Parse()
// Create Prometheus storage instance
store := storage.NewPrometheusStorage()
// Start cleanup routine
go func() {
ticker := time.NewTicker(*cleanupInterval)
defer ticker.Stop()
for range ticker.C {
store.CleanupOldInstances(*maxInstanceAge)
}
}()
// Setup HTTP handlers
mux := http.NewServeMux()
// Prometheus metrics endpoint
mux.Handle("/metrics", promhttp.Handler())
// API endpoints
apiHandler := api.NewHandler(store)
mux.HandleFunc("/api/collect", corsMiddleware(logMiddleware(apiHandler.CollectTelemetry)))
mux.HandleFunc("/api/stats", corsMiddleware(logMiddleware(apiHandler.GetStats)))
mux.HandleFunc("/api/instances", corsMiddleware(logMiddleware(apiHandler.GetInstances)))
mux.HandleFunc("/api/metrics", corsMiddleware(logMiddleware(apiHandler.GetMetrics)))
// Dashboard (optional)
if *enableDashboard {
dashboardHandler := dashboard.NewHandler()
mux.HandleFunc("/", corsMiddleware(dashboardHandler.ServeIndex))
mux.HandleFunc("/dashboard", corsMiddleware(dashboardHandler.ServeIndex))
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
}
// Health check
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(map[string]string{
"status": "ok",
"time": time.Now().UTC().Format(time.RFC3339),
})
})
addr := fmt.Sprintf(":%d", *port)
log.Printf("Starting telemetry server on %s", addr)
log.Printf("Prometheus metrics: http://localhost%s/metrics", addr)
if *enableDashboard {
log.Printf("Dashboard: http://localhost%s/dashboard", addr)
}
log.Printf("Cleanup interval: %v, Max instance age: %v", *cleanupInterval, *maxInstanceAge)
if err := http.ListenAndServe(addr, mux); err != nil {
log.Fatalf("Server failed: %v", err)
}
}
func corsMiddleware(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if *enableCORS {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
}
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
next(w, r)
}
}
func logMiddleware(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if *logRequests {
start := time.Now()
next(w, r)
log.Printf("%s %s %s %v", r.Method, r.URL.Path, r.RemoteAddr, time.Since(start))
} else {
next(w, r)
}
}
}

View file

@ -0,0 +1,235 @@
package storage
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/seaweedfs/seaweedfs/telemetry/proto"
)
type PrometheusStorage struct {
// Prometheus metrics
totalClusters prometheus.Gauge
activeClusters prometheus.Gauge
volumeServerCount *prometheus.GaugeVec
totalDiskBytes *prometheus.GaugeVec
totalVolumeCount *prometheus.GaugeVec
filerCount *prometheus.GaugeVec
brokerCount *prometheus.GaugeVec
clusterInfo *prometheus.GaugeVec
telemetryReceived prometheus.Counter
// In-memory storage for API endpoints (if needed)
mu sync.RWMutex
instances map[string]*telemetryData
stats map[string]interface{}
}
// telemetryData is an internal struct that includes the received timestamp
type telemetryData struct {
*proto.TelemetryData
ReceivedAt time.Time `json:"received_at"`
}
func NewPrometheusStorage() *PrometheusStorage {
return &PrometheusStorage{
totalClusters: promauto.NewGauge(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_total_clusters",
Help: "Total number of unique SeaweedFS clusters (last 30 days)",
}),
activeClusters: promauto.NewGauge(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_active_clusters",
Help: "Number of active SeaweedFS clusters (last 7 days)",
}),
volumeServerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_volume_servers",
Help: "Number of volume servers per cluster",
}, []string{"cluster_id", "version", "os"}),
totalDiskBytes: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_disk_bytes",
Help: "Total disk usage in bytes per cluster",
}, []string{"cluster_id", "version", "os"}),
totalVolumeCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_volume_count",
Help: "Total number of volumes per cluster",
}, []string{"cluster_id", "version", "os"}),
filerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_filer_count",
Help: "Number of filer servers per cluster",
}, []string{"cluster_id", "version", "os"}),
brokerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_broker_count",
Help: "Number of broker servers per cluster",
}, []string{"cluster_id", "version", "os"}),
clusterInfo: promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "seaweedfs_telemetry_cluster_info",
Help: "Cluster information (always 1, labels contain metadata)",
}, []string{"cluster_id", "version", "os"}),
telemetryReceived: promauto.NewCounter(prometheus.CounterOpts{
Name: "seaweedfs_telemetry_reports_received_total",
Help: "Total number of telemetry reports received",
}),
instances: make(map[string]*telemetryData),
stats: make(map[string]interface{}),
}
}
func (s *PrometheusStorage) StoreTelemetry(data *proto.TelemetryData) error {
s.mu.Lock()
defer s.mu.Unlock()
// Update Prometheus metrics
labels := prometheus.Labels{
"cluster_id": data.ClusterId,
"version": data.Version,
"os": data.Os,
}
s.volumeServerCount.With(labels).Set(float64(data.VolumeServerCount))
s.totalDiskBytes.With(labels).Set(float64(data.TotalDiskBytes))
s.totalVolumeCount.With(labels).Set(float64(data.TotalVolumeCount))
s.filerCount.With(labels).Set(float64(data.FilerCount))
s.brokerCount.With(labels).Set(float64(data.BrokerCount))
infoLabels := prometheus.Labels{
"cluster_id": data.ClusterId,
"version": data.Version,
"os": data.Os,
}
s.clusterInfo.With(infoLabels).Set(1)
s.telemetryReceived.Inc()
// Store in memory for API endpoints
s.instances[data.ClusterId] = &telemetryData{
TelemetryData: data,
ReceivedAt: time.Now().UTC(),
}
// Update aggregated stats
s.updateStats()
return nil
}
func (s *PrometheusStorage) GetStats() (map[string]interface{}, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Return cached stats
result := make(map[string]interface{})
for k, v := range s.stats {
result[k] = v
}
return result, nil
}
func (s *PrometheusStorage) GetInstances(limit int) ([]*telemetryData, error) {
s.mu.RLock()
defer s.mu.RUnlock()
var instances []*telemetryData
count := 0
for _, instance := range s.instances {
if count >= limit {
break
}
instances = append(instances, instance)
count++
}
return instances, nil
}
func (s *PrometheusStorage) GetMetrics(days int) (map[string]interface{}, error) {
s.mu.RLock()
defer s.mu.RUnlock()
// Return current metrics from in-memory storage
// Historical data should be queried from Prometheus directly
cutoff := time.Now().AddDate(0, 0, -days)
var volumeServers []map[string]interface{}
var diskUsage []map[string]interface{}
for _, instance := range s.instances {
if instance.ReceivedAt.After(cutoff) {
volumeServers = append(volumeServers, map[string]interface{}{
"date": instance.ReceivedAt.Format("2006-01-02"),
"value": instance.TelemetryData.VolumeServerCount,
})
diskUsage = append(diskUsage, map[string]interface{}{
"date": instance.ReceivedAt.Format("2006-01-02"),
"value": instance.TelemetryData.TotalDiskBytes,
})
}
}
return map[string]interface{}{
"volume_servers": volumeServers,
"disk_usage": diskUsage,
}, nil
}
func (s *PrometheusStorage) updateStats() {
now := time.Now()
last7Days := now.AddDate(0, 0, -7)
last30Days := now.AddDate(0, 0, -30)
totalInstances := 0
activeInstances := 0
versions := make(map[string]int)
osDistribution := make(map[string]int)
for _, instance := range s.instances {
if instance.ReceivedAt.After(last30Days) {
totalInstances++
}
if instance.ReceivedAt.After(last7Days) {
activeInstances++
versions[instance.TelemetryData.Version]++
osDistribution[instance.TelemetryData.Os]++
}
}
// Update Prometheus gauges
s.totalClusters.Set(float64(totalInstances))
s.activeClusters.Set(float64(activeInstances))
// Update cached stats for API
s.stats = map[string]interface{}{
"total_instances": totalInstances,
"active_instances": activeInstances,
"versions": versions,
"os_distribution": osDistribution,
}
}
// CleanupOldInstances removes instances older than the specified duration
func (s *PrometheusStorage) CleanupOldInstances(maxAge time.Duration) {
s.mu.Lock()
defer s.mu.Unlock()
cutoff := time.Now().Add(-maxAge)
for instanceID, instance := range s.instances {
if instance.ReceivedAt.Before(cutoff) {
delete(s.instances, instanceID)
// Remove from Prometheus metrics
labels := prometheus.Labels{
"cluster_id": instance.TelemetryData.ClusterId,
"version": instance.TelemetryData.Version,
"os": instance.TelemetryData.Os,
}
s.volumeServerCount.Delete(labels)
s.totalDiskBytes.Delete(labels)
s.totalVolumeCount.Delete(labels)
s.filerCount.Delete(labels)
s.brokerCount.Delete(labels)
}
}
s.updateStats()
}

View file

@ -0,0 +1,311 @@
package main
import (
"context"
"fmt"
"io"
"log"
"net/http"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/seaweedfs/seaweedfs/telemetry/proto"
"github.com/seaweedfs/seaweedfs/weed/telemetry"
protobuf "google.golang.org/protobuf/proto"
)
const (
serverPort = "18080" // Use different port to avoid conflicts
serverURL = "http://localhost:" + serverPort
)
func main() {
fmt.Println("🧪 Starting SeaweedFS Telemetry Integration Test")
// Start telemetry server
fmt.Println("📡 Starting telemetry server...")
serverCmd, err := startTelemetryServer()
if err != nil {
log.Fatalf("❌ Failed to start telemetry server: %v", err)
}
defer stopServer(serverCmd)
// Wait for server to start
if !waitForServer(serverURL+"/health", 15*time.Second) {
log.Fatal("❌ Telemetry server failed to start")
}
fmt.Println("✅ Telemetry server started successfully")
// Test protobuf marshaling first
fmt.Println("🔧 Testing protobuf marshaling...")
if err := testProtobufMarshaling(); err != nil {
log.Fatalf("❌ Protobuf marshaling test failed: %v", err)
}
fmt.Println("✅ Protobuf marshaling test passed")
// Test protobuf client
fmt.Println("🔄 Testing protobuf telemetry client...")
if err := testTelemetryClient(); err != nil {
log.Fatalf("❌ Telemetry client test failed: %v", err)
}
fmt.Println("✅ Telemetry client test passed")
// Test server metrics endpoint
fmt.Println("📊 Testing Prometheus metrics endpoint...")
if err := testMetricsEndpoint(); err != nil {
log.Fatalf("❌ Metrics endpoint test failed: %v", err)
}
fmt.Println("✅ Metrics endpoint test passed")
// Test stats API
fmt.Println("📈 Testing stats API...")
if err := testStatsAPI(); err != nil {
log.Fatalf("❌ Stats API test failed: %v", err)
}
fmt.Println("✅ Stats API test passed")
// Test instances API
fmt.Println("📋 Testing instances API...")
if err := testInstancesAPI(); err != nil {
log.Fatalf("❌ Instances API test failed: %v", err)
}
fmt.Println("✅ Instances API test passed")
fmt.Println("🎉 All telemetry integration tests passed!")
}
func startTelemetryServer() (*exec.Cmd, error) {
// Get the directory where this test is running
testDir, err := os.Getwd()
if err != nil {
return nil, fmt.Errorf("failed to get working directory: %v", err)
}
// Navigate to the server directory (from main seaweedfs directory)
serverDir := filepath.Join(testDir, "telemetry", "server")
cmd := exec.Command("go", "run", ".",
"-port="+serverPort,
"-dashboard=false",
"-cleanup=1m",
"-max-age=1h")
cmd.Dir = serverDir
// Create log files for server output
logFile, err := os.Create("telemetry-server-test.log")
if err != nil {
return nil, fmt.Errorf("failed to create log file: %v", err)
}
cmd.Stdout = logFile
cmd.Stderr = logFile
if err := cmd.Start(); err != nil {
return nil, fmt.Errorf("failed to start server: %v", err)
}
return cmd, nil
}
func stopServer(cmd *exec.Cmd) {
if cmd != nil && cmd.Process != nil {
cmd.Process.Signal(syscall.SIGTERM)
cmd.Wait()
// Clean up log file
os.Remove("telemetry-server-test.log")
}
}
func waitForServer(url string, timeout time.Duration) bool {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
fmt.Printf("⏳ Waiting for server at %s...\n", url)
for {
select {
case <-ctx.Done():
return false
default:
resp, err := http.Get(url)
if err == nil {
resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return true
}
}
time.Sleep(500 * time.Millisecond)
}
}
}
func testProtobufMarshaling() error {
// Test protobuf marshaling/unmarshaling
testData := &proto.TelemetryData{
ClusterId: "test-cluster-12345",
Version: "test-3.45",
Os: "linux/amd64",
VolumeServerCount: 2,
TotalDiskBytes: 1000000,
TotalVolumeCount: 10,
FilerCount: 1,
BrokerCount: 1,
Timestamp: time.Now().Unix(),
}
// Marshal
data, err := protobuf.Marshal(testData)
if err != nil {
return fmt.Errorf("failed to marshal protobuf: %v", err)
}
fmt.Printf(" Protobuf size: %d bytes\n", len(data))
// Unmarshal
testData2 := &proto.TelemetryData{}
if err := protobuf.Unmarshal(data, testData2); err != nil {
return fmt.Errorf("failed to unmarshal protobuf: %v", err)
}
// Verify data
if testData2.ClusterId != testData.ClusterId {
return fmt.Errorf("protobuf data mismatch: expected %s, got %s",
testData.ClusterId, testData2.ClusterId)
}
if testData2.VolumeServerCount != testData.VolumeServerCount {
return fmt.Errorf("volume server count mismatch: expected %d, got %d",
testData.VolumeServerCount, testData2.VolumeServerCount)
}
return nil
}
func testTelemetryClient() error {
// Create telemetry client
client := telemetry.NewClient(serverURL+"/api/collect", true)
// Create test data using protobuf format
testData := &proto.TelemetryData{
Version: "test-3.45",
Os: "linux/amd64",
VolumeServerCount: 3,
TotalDiskBytes: 1073741824, // 1GB
TotalVolumeCount: 50,
FilerCount: 2,
BrokerCount: 1,
Timestamp: time.Now().Unix(),
}
// Send telemetry data
if err := client.SendTelemetry(testData); err != nil {
return fmt.Errorf("failed to send telemetry: %v", err)
}
fmt.Printf(" Sent telemetry for cluster: %s\n", client.GetInstanceID())
// Wait a bit for processing
time.Sleep(2 * time.Second)
return nil
}
func testMetricsEndpoint() error {
resp, err := http.Get(serverURL + "/metrics")
if err != nil {
return fmt.Errorf("failed to get metrics: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("metrics endpoint returned status %d", resp.StatusCode)
}
// Read response and check for expected metrics
content, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read metrics response: %v", err)
}
contentStr := string(content)
expectedMetrics := []string{
"seaweedfs_telemetry_total_clusters",
"seaweedfs_telemetry_active_clusters",
"seaweedfs_telemetry_reports_received_total",
"seaweedfs_telemetry_volume_servers",
"seaweedfs_telemetry_disk_bytes",
"seaweedfs_telemetry_volume_count",
"seaweedfs_telemetry_filer_count",
"seaweedfs_telemetry_broker_count",
}
for _, metric := range expectedMetrics {
if !strings.Contains(contentStr, metric) {
return fmt.Errorf("missing expected metric: %s", metric)
}
}
// Check that we have at least one report received
if !strings.Contains(contentStr, "seaweedfs_telemetry_reports_received_total 1") {
fmt.Printf(" Warning: Expected at least 1 report received, metrics content:\n%s\n", contentStr)
}
fmt.Printf(" Found %d expected metrics\n", len(expectedMetrics))
return nil
}
func testStatsAPI() error {
resp, err := http.Get(serverURL + "/api/stats")
if err != nil {
return fmt.Errorf("failed to get stats: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("stats API returned status %d", resp.StatusCode)
}
// Read and verify JSON response
content, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read stats response: %v", err)
}
contentStr := string(content)
if !strings.Contains(contentStr, "total_instances") {
return fmt.Errorf("stats response missing total_instances field")
}
fmt.Printf(" Stats response: %s\n", contentStr)
return nil
}
func testInstancesAPI() error {
resp, err := http.Get(serverURL + "/api/instances?limit=10")
if err != nil {
return fmt.Errorf("failed to get instances: %v", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("instances API returned status %d", resp.StatusCode)
}
// Read response
content, err := io.ReadAll(resp.Body)
if err != nil {
return fmt.Errorf("failed to read instances response: %v", err)
}
fmt.Printf(" Instances response length: %d bytes\n", len(content))
return nil
}

View file

@ -1,13 +1,13 @@
package main
import (
"context"
"flag"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"log"
"math/rand"
"time"
"context"
"google.golang.org/grpc"
@ -56,7 +56,7 @@ func main() {
}
func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
assignResult, err := operation.Assign(func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{
assignResult, err := operation.Assign(context.Background(), func(_ context.Context) pb.ServerAddress { return pb.ServerAddress(*master) }, grpcDialOption, &operation.VolumeAssignRequest{
Count: 1,
Replication: *replication,
})
@ -84,7 +84,7 @@ func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, st
log.Fatalf("upload: %v", err)
}
_, err = uploader.UploadData(data, uploadOption)
_, err = uploader.UploadData(context.Background(), data, uploadOption)
if err != nil {
log.Fatalf("upload: %v", err)
}

View file

@ -1,12 +1,13 @@
package lock_manager
import (
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
"sort"
"sync"
"time"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util"
)
type LockRingSnapshot struct {
@ -22,6 +23,7 @@ type LockRing struct {
lastCompactTime time.Time
snapshotInterval time.Duration
onTakeSnapshot func(snapshot []pb.ServerAddress)
cleanupWg sync.WaitGroup
}
func NewLockRing(snapshotInterval time.Duration) *LockRing {
@ -87,7 +89,9 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
r.addOneSnapshot(servers)
r.cleanupWg.Add(1)
go func() {
defer r.cleanupWg.Done()
<-time.After(r.snapshotInterval)
r.compactSnapshots()
}()
@ -96,7 +100,9 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
func (r *LockRing) takeSnapshotWithDelayedCompaction() {
r.doTakeSnapshot()
r.cleanupWg.Add(1)
go func() {
defer r.cleanupWg.Done()
<-time.After(r.snapshotInterval)
r.compactSnapshots()
}()
@ -172,6 +178,19 @@ func (r *LockRing) GetSnapshot() (servers []pb.ServerAddress) {
return r.snapshots[0].servers
}
// WaitForCleanup waits for all pending cleanup operations to complete
// This is useful for testing to ensure deterministic behavior
func (r *LockRing) WaitForCleanup() {
r.cleanupWg.Wait()
}
// GetSnapshotCount safely returns the number of snapshots for testing
func (r *LockRing) GetSnapshotCount() int {
r.RLock()
defer r.RUnlock()
return len(r.snapshots)
}
func hashKeyToServer(key string, servers []pb.ServerAddress) pb.ServerAddress {
if len(servers) == 0 {
return ""

View file

@ -1,43 +1,91 @@
package lock_manager
import (
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/stretchr/testify/assert"
)
func TestAddServer(t *testing.T) {
r := NewLockRing(100 * time.Millisecond)
// Add servers
r.AddServer("localhost:8080")
assert.Equal(t, 1, len(r.snapshots))
r.AddServer("localhost:8081")
r.AddServer("localhost:8082")
r.AddServer("localhost:8083")
r.AddServer("localhost:8084")
// Verify all servers are present
servers := r.GetSnapshot()
assert.Equal(t, 5, len(servers))
assert.Contains(t, servers, pb.ServerAddress("localhost:8080"))
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
assert.Contains(t, servers, pb.ServerAddress("localhost:8082"))
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
assert.Contains(t, servers, pb.ServerAddress("localhost:8084"))
// Remove servers
r.RemoveServer("localhost:8084")
r.RemoveServer("localhost:8082")
r.RemoveServer("localhost:8080")
assert.Equal(t, 8, len(r.snapshots))
// Wait for all cleanup operations to complete
r.WaitForCleanup()
// Verify only 2 servers remain (localhost:8081 and localhost:8083)
servers = r.GetSnapshot()
assert.Equal(t, 2, len(servers))
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
// Verify cleanup has happened - wait for snapshot interval and check snapshots are compacted
time.Sleep(110 * time.Millisecond)
r.WaitForCleanup()
assert.Equal(t, 2, len(r.snapshots))
// Verify snapshot history is cleaned up properly (should have at most 2 snapshots after compaction)
snapshotCount := r.GetSnapshotCount()
assert.LessOrEqual(t, snapshotCount, 2, "Snapshot history should be compacted")
}
func TestLockRing(t *testing.T) {
r := NewLockRing(100 * time.Millisecond)
// Test initial snapshot
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081"})
assert.Equal(t, 1, len(r.snapshots))
assert.Equal(t, 1, r.GetSnapshotCount())
servers := r.GetSnapshot()
assert.Equal(t, 2, len(servers))
assert.Contains(t, servers, pb.ServerAddress("localhost:8080"))
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
// Add another server
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082"})
assert.Equal(t, 2, len(r.snapshots))
assert.Equal(t, 2, r.GetSnapshotCount())
servers = r.GetSnapshot()
assert.Equal(t, 3, len(servers))
assert.Contains(t, servers, pb.ServerAddress("localhost:8082"))
// Wait for cleanup interval and add another server
time.Sleep(110 * time.Millisecond)
r.WaitForCleanup()
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083"})
assert.Equal(t, 3, len(r.snapshots))
assert.LessOrEqual(t, r.GetSnapshotCount(), 3)
servers = r.GetSnapshot()
assert.Equal(t, 4, len(servers))
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
// Wait for cleanup and verify compaction
time.Sleep(110 * time.Millisecond)
assert.Equal(t, 2, len(r.snapshots))
r.WaitForCleanup()
assert.LessOrEqual(t, r.GetSnapshotCount(), 2, "Snapshots should be compacted")
// Add final server
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083", "localhost:8084"})
assert.Equal(t, 3, len(r.snapshots))
servers = r.GetSnapshot()
assert.Equal(t, 5, len(servers))
assert.Contains(t, servers, pb.ServerAddress("localhost:8084"))
assert.LessOrEqual(t, r.GetSnapshotCount(), 3)
}

View file

@ -115,7 +115,10 @@ func runBackup(cmd *Command, args []string) bool {
return true
}
}
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
ver := needle.Version(stats.Version)
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true
@ -142,7 +145,7 @@ func runBackup(cmd *Command, args []string) bool {
fmt.Printf("Error destroying volume: %v\n", err)
}
// recreate an empty volume
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0)
if err != nil {
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
return true

View file

@ -5,6 +5,7 @@ import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/pb"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"io"
"math"
"math/rand"
@ -115,7 +116,7 @@ func runBenchmark(cmd *Command, args []string) bool {
util.LoadSecurityConfiguration()
b.grpcDialOption = security.LoadClientTLS(util.GetViper(), "grpc.client")
fmt.Printf("This is SeaweedFS version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
fmt.Printf("This is SeaweedFS version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH)
if *b.maxCpu < 1 {
*b.maxCpu = runtime.NumCPU()
}
@ -241,7 +242,7 @@ func writeFiles(idChan chan int, fileIdLineChan chan string, s *stat) {
Replication: *b.replication,
DiskType: *b.diskType,
}
if assignResult, err := operation.Assign(b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
if assignResult, err := operation.Assign(context.Background(), b.masterClient.GetMaster, b.grpcDialOption, ar); err == nil {
fp.Server, fp.Fid, fp.Pref.Collection = assignResult.Url, assignResult.Fid, *b.collection
if !isSecure && assignResult.Auth != "" {
isSecure = true
@ -288,7 +289,7 @@ func readFiles(fileIdLineChan chan string, s *stat) {
start := time.Now()
var bytesRead int
var err error
urls, err := b.masterClient.LookupFileId(fid)
urls, err := b.masterClient.LookupFileId(context.Background(), fid)
if err != nil {
s.failed++
println("!!!! ", fid, " location not found!!!!!")

View file

@ -41,7 +41,7 @@ func runCompact(cmd *Command, args []string) bool {
preallocate := *compactVolumePreallocate * (1 << 20)
vid := needle.VolumeId(*compactVolumeId)
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0, 0)
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, needle.GetCurrentVersion(), 0, 0)
if err != nil {
glog.Fatalf("Load Volume [ERROR] %s\n", err)
}

View file

@ -5,6 +5,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net"
"net/http"
"os"
@ -329,7 +330,7 @@ func (fo *FilerOptions) startFiler() {
if *fo.publicPort != 0 {
publicListeningAddress := util.JoinHostPort(*fo.bindIp, *fo.publicPort)
glog.V(0).Infoln("Start Seaweed filer server", util.Version(), "public at", publicListeningAddress)
glog.V(0).Infoln("Start Seaweed filer server", version.Version(), "public at", publicListeningAddress)
publicListener, localPublicListener, e := util.NewIpAndLocalListeners(*fo.bindIp, *fo.publicPort, 0)
if e != nil {
glog.Fatalf("Filer server public listener error on port %d:%v", *fo.publicPort, e)
@ -348,7 +349,7 @@ func (fo *FilerOptions) startFiler() {
}
}
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", util.Version(), *fo.ip, *fo.port)
glog.V(0).Infof("Start Seaweed Filer %s at %s:%d", version.Version(), *fo.ip, *fo.port)
filerListener, filerLocalListener, e := util.NewIpAndLocalListeners(
*fo.bindIp, *fo.port,
time.Duration(10)*time.Second,

View file

@ -28,9 +28,9 @@ type FilerCatOptions struct {
}
func (fco *FilerCatOptions) GetLookupFileIdFunction() wdclient.LookupFileIdFunctionType {
return func(fileId string) (targetUrls []string, err error) {
return func(ctx context.Context, fileId string) (targetUrls []string, err error) {
vid := filer.VolumeId(fileId)
resp, err := fco.filerClient.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
resp, err := fco.filerClient.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {

View file

@ -193,6 +193,13 @@ func runFuse(cmd *Command, args []string) bool {
} else {
panic(fmt.Errorf("readOnly: %s", err))
}
case "disableXAttr":
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
mountOptions.disableXAttr = &parsed
} else {
panic(fmt.Errorf("disableXAttr: %s", err))
}
case "cpuprofile":
mountCpuProfile = &parameter.value
case "memprofile":

View file

@ -3,6 +3,7 @@ package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net/http"
"time"
@ -89,7 +90,7 @@ func (iamopt *IamOptions) startIamServer() bool {
glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err)
}
glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port)
glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", version.Version(), *iamopt.port)
if iamApiLocalListener != nil {
go func() {
if err = httpS.Serve(iamApiLocalListener); err != nil {

View file

@ -9,6 +9,8 @@ import (
"strings"
"time"
"github.com/seaweedfs/seaweedfs/weed/util/version"
hashicorpRaft "github.com/hashicorp/raft"
"slices"
@ -59,6 +61,8 @@ type MasterOptions struct {
electionTimeout *time.Duration
raftHashicorp *bool
raftBootstrap *bool
telemetryUrl *string
telemetryEnabled *bool
}
func init() {
@ -86,6 +90,8 @@ func init() {
m.electionTimeout = cmdMaster.Flag.Duration("electionTimeout", 10*time.Second, "election timeout of master servers")
m.raftHashicorp = cmdMaster.Flag.Bool("raftHashicorp", false, "use hashicorp raft")
m.raftBootstrap = cmdMaster.Flag.Bool("raftBootstrap", false, "Whether to bootstrap the Raft cluster")
m.telemetryUrl = cmdMaster.Flag.String("telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics")
m.telemetryEnabled = cmdMaster.Flag.Bool("telemetry", false, "enable telemetry reporting")
}
var cmdMaster = &Command{
@ -110,6 +116,11 @@ func runMaster(cmd *Command, args []string) bool {
util.LoadSecurityConfiguration()
util.LoadConfiguration("master", false)
// bind viper configuration to command line flags
if v := util.GetViper().GetString("master.mdir"); v != "" {
*m.metaFolder = v
}
grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
parent, _ := util.FullPath(*m.metaFolder).DirAndName()
@ -160,7 +171,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, masterOption.toMasterOption(masterWhiteList), masterPeers)
listeningAddress := util.JoinHostPort(*masterOption.ipBind, *masterOption.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
glog.V(0).Infof("Start Seaweed Master %s at %s", version.Version(), listeningAddress)
masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOption.ipBind, *masterOption.port, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
@ -211,7 +222,7 @@ func startMaster(masterOption MasterOptions, masterWhiteList []string) {
protobuf.RegisterRaftServer(grpcS, raftServer)
}
reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOption.ipBind, grpcPort)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", version.Version(), *masterOption.ipBind, grpcPort)
if grpcLocalL != nil {
go grpcS.Serve(grpcLocalL)
}
@ -325,5 +336,7 @@ func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOp
DisableHttp: *m.disableHttp,
MetricsAddress: *m.metricsAddress,
MetricsIntervalSec: *m.metricsIntervalSec,
TelemetryUrl: *m.telemetryUrl,
TelemetryEnabled: *m.telemetryEnabled,
}
}

View file

@ -3,6 +3,7 @@ package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net/http"
"time"
@ -119,7 +120,7 @@ func startMasterFollower(masterOptions MasterOptions) {
r := mux.NewRouter()
ms := weed_server.NewMasterServer(r, option, masters)
listeningAddress := util.JoinHostPort(*masterOptions.ipBind, *masterOptions.port)
glog.V(0).Infof("Start Seaweed Master %s at %s", util.Version(), listeningAddress)
glog.V(0).Infof("Start Seaweed Master %s at %s", version.Version(), listeningAddress)
masterListener, masterLocalListener, e := util.NewIpAndLocalListeners(*masterOptions.ipBind, *masterOptions.port, 0)
if e != nil {
glog.Fatalf("Master startup error: %v", e)
@ -134,7 +135,7 @@ func startMasterFollower(masterOptions MasterOptions) {
grpcS := pb.NewGrpcServer(security.LoadServerTLS(util.GetViper(), "grpc.master"))
master_pb.RegisterSeaweedServer(grpcS, ms)
reflection.Register(grpcS)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", util.Version(), *masterOptions.ip, grpcPort)
glog.V(0).Infof("Start Seaweed Master %s grpc server at %s:%d", version.Version(), *masterOptions.ip, grpcPort)
if grpcLocalL != nil {
go grpcS.Serve(grpcLocalL)
}

View file

@ -0,0 +1,5 @@
package command
func checkMountPointAvailable(dir string) bool {
return true
}

View file

@ -1,5 +1,5 @@
//go:build !linux && !darwin
// +build !linux,!darwin
//go:build !linux && !darwin && !freebsd
// +build !linux,!darwin,!freebsd
package command

View file

@ -1,5 +1,5 @@
//go:build linux || darwin
// +build linux darwin
//go:build linux || darwin || freebsd
// +build linux darwin freebsd
package command
@ -16,6 +16,8 @@ import (
"syscall"
"time"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"github.com/hanwen/go-fuse/v2/fuse"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/mount"
@ -290,7 +292,7 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
}
glog.V(0).Infof("mounted %s%s to %v", *option.filer, mountRoot, dir)
glog.V(0).Infof("This is SeaweedFS version %s %s %s", util.Version(), runtime.GOOS, runtime.GOARCH)
glog.V(0).Infof("This is SeaweedFS version %s %s %s", version.Version(), runtime.GOOS, runtime.GOARCH)
server.Serve()

View file

@ -5,6 +5,7 @@ import (
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"io/ioutil"
"net"
"net/http"
@ -339,7 +340,7 @@ func (s3opt *S3Options) startS3Server() bool {
glog.Fatalf("error with tls config: %v", err)
}
if *s3opt.portHttps == 0 {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.port)
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", version.Version(), *s3opt.port)
if s3ApiLocalListener != nil {
go func() {
if err = httpS.ServeTLS(s3ApiLocalListener, "", ""); err != nil {
@ -351,9 +352,9 @@ func (s3opt *S3Options) startS3Server() bool {
glog.Fatalf("S3 API Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", util.Version(), *s3opt.portHttps)
glog.V(0).Infof("Start Seaweed S3 API Server %s at https port %d", version.Version(), *s3opt.portHttps)
s3ApiListenerHttps, s3ApiLocalListenerHttps, _ := util.NewIpAndLocalListeners(
*s3opt.bindIp, *s3opt.portHttps, time.Duration(*s3opt.idleTimeout)*time.Second)
*s3opt.bindIp, *s3opt.portHttps, time.Duration(*s3opt.idleTimeout)*time.Second)
if s3ApiLocalListenerHttps != nil {
go func() {
if err = httpS.ServeTLS(s3ApiLocalListenerHttps, "", ""); err != nil {
@ -369,7 +370,7 @@ func (s3opt *S3Options) startS3Server() bool {
}
}
if *s3opt.tlsPrivateKey == "" || *s3opt.portHttps > 0 {
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", util.Version(), *s3opt.port)
glog.V(0).Infof("Start Seaweed S3 API Server %s at http port %d", version.Version(), *s3opt.port)
if s3ApiLocalListener != nil {
go func() {
if err = httpS.Serve(s3ApiLocalListener); err != nil {

View file

@ -116,7 +116,13 @@ connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
upsertQuery = """
INSERT INTO "%[1]s" (dirhash, name, directory, meta)
VALUES($1, $2, $3, $4)
ON CONFLICT (dirhash, name) DO UPDATE SET
directory=EXCLUDED.directory,
meta=EXCLUDED.meta
"""
[postgres2]
enabled = false
@ -141,7 +147,13 @@ connection_max_open = 100
connection_max_lifetime_seconds = 0
# if insert/upsert failing, you can disable upsert or update query syntax to match your RDBMS syntax:
enableUpsert = true
upsertQuery = """UPSERT INTO "%[1]s" (dirhash,name,directory,meta) VALUES($1,$2,$3,$4)"""
upsertQuery = """
INSERT INTO "%[1]s" (dirhash, name, directory, meta)
VALUES($1, $2, $3, $4)
ON CONFLICT (dirhash, name) DO UPDATE SET
directory=EXCLUDED.directory,
meta=EXCLUDED.meta
"""
[cassandra2]
# CREATE TABLE filemeta (

View file

@ -104,6 +104,8 @@ func init() {
masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers")
masterOptions.telemetryUrl = cmdServer.Flag.String("master.telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics")
masterOptions.telemetryEnabled = cmdServer.Flag.Bool("master.telemetry", false, "enable telemetry reporting")
filerOptions.filerGroup = cmdServer.Flag.String("filer.filerGroup", "", "share metadata with other filers in the same filerGroup")
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")

View file

@ -3,6 +3,7 @@ package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net"
"os"
"runtime"
@ -175,7 +176,7 @@ func (sftpOpt *SftpOptions) startSftpServer() bool {
glog.Fatalf("SFTP server listener on %s error: %v", listenAddress, err)
}
glog.V(0).Infof("Start Seaweed SFTP Server %s at %s", util.Version(), listenAddress)
glog.V(0).Infof("Start Seaweed SFTP Server %s at %s", version.Version(), listenAddress)
if sftpLocalListener != nil {
go func() {

View file

@ -10,6 +10,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
swv "github.com/seaweedfs/seaweedfs/weed/util/version"
"io"
"net/http"
"os"
@ -117,7 +118,7 @@ func runUpdate(cmd *Command, args []string) bool {
}
func downloadRelease(ctx context.Context, target string, ver string) (version string, err error) {
currentVersion := util.VERSION_NUMBER
currentVersion := swv.VERSION_NUMBER
rel, err := GitHubLatestRelease(ctx, ver, "seaweedfs", "seaweedfs")
if err != nil {
return "", err

View file

@ -2,9 +2,8 @@ package command
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"runtime"
"github.com/seaweedfs/seaweedfs/weed/util"
)
var cmdVersion = &Command{
@ -19,6 +18,9 @@ func runVersion(cmd *Command, args []string) bool {
cmd.Usage()
}
fmt.Printf("version %s %s %s\n", util.Version(), runtime.GOOS, runtime.GOARCH)
fmt.Printf("version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH)
println()
println("For enterprise users, please visit https://seaweedfs.com for SeaweedFS Enterprise Edition,")
println("which has a self-healing storage format with better data protection.")
return true
}

View file

@ -2,6 +2,7 @@ package command
import (
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net/http"
httppprof "net/http/pprof"
"os"
@ -351,7 +352,7 @@ func (v VolumeServerOptions) startGrpcService(vs volume_server_pb.VolumeServerSe
func (v VolumeServerOptions) startPublicHttpService(handler http.Handler) httpdown.Server {
publicListeningAddress := util.JoinHostPort(*v.bindIp, *v.publicPort)
glog.V(0).Infoln("Start Seaweed volume server", util.Version(), "public at", publicListeningAddress)
glog.V(0).Infoln("Start Seaweed volume server", version.Version(), "public at", publicListeningAddress)
publicListener, e := util.NewListener(publicListeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)
@ -378,7 +379,7 @@ func (v VolumeServerOptions) startClusterHttpService(handler http.Handler) httpd
}
listeningAddress := util.JoinHostPort(*v.bindIp, *v.port)
glog.V(0).Infof("Start Seaweed volume server %s at %s", util.Version(), listeningAddress)
glog.V(0).Infof("Start Seaweed volume server %s at %s", version.Version(), listeningAddress)
listener, e := util.NewListener(listeningAddress, time.Duration(*v.idleConnectionTimeout)*time.Second)
if e != nil {
glog.Fatalf("Volume server listener error:%v", e)

View file

@ -3,6 +3,7 @@ package command
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/util/version"
"net/http"
"os"
"os/user"
@ -65,7 +66,7 @@ func runWebDav(cmd *Command, args []string) bool {
util.LoadSecurityConfiguration()
listenAddress := fmt.Sprintf("%s:%d", *webDavStandaloneOptions.ipBind, *webDavStandaloneOptions.port)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at %s", util.Version(), listenAddress)
glog.V(0).Infof("Starting Seaweed WebDav Server %s at %s", version.Version(), listenAddress)
return webDavStandaloneOptions.startWebDav()
@ -136,12 +137,12 @@ func (wo *WebDavOption) startWebDav() bool {
}
if *wo.tlsPrivateKey != "" {
glog.V(0).Infof("Start Seaweed WebDav Server %s at https %s", util.Version(), listenAddress)
glog.V(0).Infof("Start Seaweed WebDav Server %s at https %s", version.Version(), listenAddress)
if err = httpS.ServeTLS(webDavListener, *wo.tlsCertificate, *wo.tlsPrivateKey); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}
} else {
glog.V(0).Infof("Start Seaweed WebDav Server %s at http %s", util.Version(), listenAddress)
glog.V(0).Infof("Start Seaweed WebDav Server %s at http %s", version.Version(), listenAddress)
if err = httpS.Serve(webDavListener); err != nil {
glog.Fatalf("WebDav Server Fail to serve: %v", err)
}

View file

@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") {
// now the insert failed possibly due to duplication constraints
sqlInsert = "falls back to update"
glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err)
glog.V(1).InfofCtx(ctx, "insert %s %s: %v", entry.FullPath, sqlInsert, err)
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
}
if err != nil {
@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
}
}
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
glog.V(4).InfofCtx(ctx, "delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
if err != nil {
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
var name string
var data []byte
if err = rows.Scan(&name, &data); err != nil {
glog.V(0).Infof("scan %s : %v", dirPath, err)
glog.V(0).InfofCtx(ctx, "scan %s : %v", dirPath, err)
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
}
lastFileName = name
@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
FullPath: util.NewFullPath(string(dirPath), name),
}
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "scan decode %s : %v", entry.FullPath, err)
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
}

View file

@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
}
// now the insert failed possibly due to duplication constraints
glog.V(1).Infof("kv insert falls back to update: %s", err)
glog.V(1).InfofCtx(ctx, "kv insert falls back to update: %s", err)
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
if err != nil {

View file

@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat
if driver.IsNotFound(err) {
return nil, filer_pb.ErrNotFound
}
glog.Errorf("find %s: %v", fullpath, err)
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
if len(data.Meta) == 0 {
@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP
}
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
if err != nil && !driver.IsNotFound(err) {
glog.Errorf("find %s: %v", fullpath, err)
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
return fmt.Errorf("delete %s : %v", fullpath, err)
}
return nil
@ -331,7 +331,7 @@ sort d.name asc
converted := arrayToBytes(data.Meta)
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}

View file

@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
return nil, filer.ErrKvNotFound
}
if err != nil {
glog.Errorf("kv get: %s %v", string(key), err)
glog.ErrorfCtx(ctx, "kv get: %s %v", string(key), err)
return nil, filer.ErrKvNotFound
}
return arrayToBytes(model.Meta), nil
@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
if err != nil {
glog.Errorf("kv del: %v", err)
glog.ErrorfCtx(ctx, "kv del: %v", err)
return filer.ErrKvNotFound
}
return nil

View file

@ -4,9 +4,10 @@ import (
"context"
"errors"
"fmt"
"github.com/gocql/gocql"
"time"
"github.com/gocql/gocql"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -202,7 +203,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {
@ -210,7 +211,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
}
}
if err = iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
}
return lastFileName, err

View file

@ -4,9 +4,10 @@ import (
"context"
"errors"
"fmt"
"github.com/gocql/gocql"
"time"
"github.com/gocql/gocql"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
@ -202,7 +203,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
lastFileName = name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {
@ -210,7 +211,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
}
}
if err = iter.Close(); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
}
return lastFileName, err

View file

@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
}
value, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry marshal %v", err)
}
_, err = store.client.Index().
@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
BodyJson(string(value)).
Do(ctx)
if err != nil {
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
return fmt.Errorf("insert entry %v", err)
}
return nil
@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
return esEntry.Entry, err
}
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
glog.ErrorfCtx(ctx, "find entry(%s),%v.", string(fullpath), err)
return nil, filer_pb.ErrNotFound
}
@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
return nil
}
glog.Errorf("delete index(%s) %v.", index, err)
glog.ErrorfCtx(ctx, "delete index(%s) %v.", index, err)
return err
}
@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
return nil
}
}
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err)
return fmt.Errorf("delete entry %v", err)
}
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
glog.ErrorfCtx(ctx, "elastic delete %s: %v.", entry.FullPath, err)
return false
}
return true
@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
result := &elastic.SearchResult{}
if (startFileName == "" && first) || inclusive {
if result, err = store.search(ctx, index, parentId); err != nil {
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
glog.ErrorfCtx(ctx, "search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return
}
} else {
@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries(
}
after := weed_util.Md5String([]byte(fullPath))
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
glog.ErrorfCtx(ctx, "searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
return
}
}

View file

@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
return nil
}
}
glog.Errorf("delete key(id:%s) %v.", string(key), err)
glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err)
return fmt.Errorf("delete key %v", err)
}
@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
return esEntry.Value, nil
}
}
glog.Errorf("find key(%s),%v.", string(key), err)
glog.ErrorfCtx(ctx, "find key(%s),%v.", string(key), err)
return value, filer.ErrKvNotFound
}
@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
esEntry := &ESKVEntry{value}
val, err := jsoniter.Marshal(esEntry)
if err != nil {
glog.Errorf("insert key(%s) %v.", string(key), err)
glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err)
return fmt.Errorf("insert key %v", err)
}
_, err = store.client.Index().

View file

@ -4,10 +4,11 @@ import (
"context"
"crypto/tls"
"fmt"
"go.etcd.io/etcd/client/pkg/v3/transport"
"strings"
"time"
"go.etcd.io/etcd/client/pkg/v3/transport"
"go.etcd.io/etcd/client/v3"
"github.com/seaweedfs/seaweedfs/weed/filer"
@ -95,7 +96,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t
return fmt.Errorf("error checking etcd connection: %s", err)
}
glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
glog.V(0).InfofCtx(ctx, "сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
store.client = client
return nil
@ -208,7 +209,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View file

@ -1,6 +1,7 @@
package filer
import (
"context"
"io"
"sync"
@ -89,7 +90,7 @@ func (group *ChunkGroup) SetChunks(chunks []*filer_pb.FileChunk) error {
continue
}
resolvedChunks, err := ResolveOneChunkManifest(group.lookupFn, chunk)
resolvedChunks, err := ResolveOneChunkManifest(context.Background(), group.lookupFn, chunk)
if err != nil {
return err
}

View file

@ -2,6 +2,7 @@ package filer
import (
"bytes"
"context"
"fmt"
"io"
"math"
@ -48,7 +49,7 @@ func SeparateManifestChunks(chunks []*filer_pb.FileChunk) (manifestChunks, nonMa
return
}
func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
func ResolveChunkManifest(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset, stopOffset int64) (dataChunks, manifestChunks []*filer_pb.FileChunk, manifestResolveErr error) {
// TODO maybe parallel this
for _, chunk := range chunks {
@ -61,14 +62,14 @@ func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chun
continue
}
resolvedChunks, err := ResolveOneChunkManifest(lookupFileIdFn, chunk)
resolvedChunks, err := ResolveOneChunkManifest(ctx, lookupFileIdFn, chunk)
if err != nil {
return dataChunks, nil, err
}
manifestChunks = append(manifestChunks, chunk)
// recursive
subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(lookupFileIdFn, resolvedChunks, startOffset, stopOffset)
subDataChunks, subManifestChunks, subErr := ResolveChunkManifest(ctx, lookupFileIdFn, resolvedChunks, startOffset, stopOffset)
if subErr != nil {
return dataChunks, nil, subErr
}
@ -78,7 +79,7 @@ func ResolveChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chun
return
}
func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunk *filer_pb.FileChunk) (dataChunks []*filer_pb.FileChunk, manifestResolveErr error) {
if !chunk.IsChunkManifest {
return
}
@ -87,7 +88,7 @@ func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, c
bytesBuffer := bytesBufferPool.Get().(*bytes.Buffer)
bytesBuffer.Reset()
defer bytesBufferPool.Put(bytesBuffer)
err := fetchWholeChunk(bytesBuffer, lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed)
err := fetchWholeChunk(ctx, bytesBuffer, lookupFileIdFn, chunk.GetFileIdString(), chunk.CipherKey, chunk.IsCompressed)
if err != nil {
return nil, fmt.Errorf("fail to read manifest %s: %v", chunk.GetFileIdString(), err)
}
@ -102,13 +103,13 @@ func ResolveOneChunkManifest(lookupFileIdFn wdclient.LookupFileIdFunctionType, c
}
// TODO fetch from cache for weed mount?
func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
urlStrings, err := lookupFileIdFn(fileId)
func fetchWholeChunk(ctx context.Context, bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
urlStrings, err := lookupFileIdFn(ctx, fileId)
if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
glog.ErrorfCtx(ctx, "operation LookupFileId %s failed, err: %v", fileId, err)
return err
}
err = retriedStreamFetchChunkData(bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
err = retriedStreamFetchChunkData(ctx, bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
if err != nil {
return err
}
@ -116,15 +117,15 @@ func fetchWholeChunk(bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFi
}
func fetchChunkRange(buffer []byte, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool, offset int64) (int, error) {
urlStrings, err := lookupFileIdFn(fileId)
urlStrings, err := lookupFileIdFn(context.Background(), fileId)
if err != nil {
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
return 0, err
}
return util_http.RetriedFetchChunkData(buffer, urlStrings, cipherKey, isGzipped, false, offset)
return util_http.RetriedFetchChunkData(context.Background(), buffer, urlStrings, cipherKey, isGzipped, false, offset)
}
func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrings []string, jwt string, cipherKey []byte, isGzipped bool, isFullChunk bool, offset int64, size int) (err error) {
var shouldRetry bool
var totalWritten int
@ -135,7 +136,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
retriedCnt++
var localProcessed int
var writeErr error
shouldRetry, err = util_http.ReadUrlAsStreamAuthenticated(urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
shouldRetry, err = util_http.ReadUrlAsStreamAuthenticated(ctx, urlString+"?readDeleted=true", jwt, cipherKey, isGzipped, isFullChunk, offset, size, func(data []byte) {
if totalWritten > localProcessed {
toBeSkipped := totalWritten - localProcessed
if len(data) <= toBeSkipped {
@ -158,7 +159,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
break
}
if err != nil {
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
glog.V(0).InfofCtx(ctx, "read %s failed, err: %v", urlString, err)
} else {
break
}
@ -168,7 +169,7 @@ func retriedStreamFetchChunkData(writer io.Writer, urlStrings []string, jwt stri
break
}
if err != nil && shouldRetry {
glog.V(0).Infof("retry reading in %v", waitTime)
glog.V(0).InfofCtx(ctx, "retry reading in %v", waitTime)
time.Sleep(waitTime)
} else {
break

View file

@ -2,6 +2,7 @@ package filer
import (
"bytes"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/wdclient"
"math"
@ -61,9 +62,9 @@ func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5Digests, nil)), len(chunks))
}
func CompactFileChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
func CompactFileChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk) (compacted, garbage []*filer_pb.FileChunk) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, 0, math.MaxInt64)
visibles, _ := NonOverlappingVisibleIntervals(ctx, lookupFileIdFn, chunks, 0, math.MaxInt64)
compacted, garbage = SeparateGarbageChunks(visibles, chunks)
@ -98,13 +99,13 @@ func FindGarbageChunks(visibles *IntervalList[*VisibleInterval], start int64, st
return
}
func MinusChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
func MinusChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, as, bs []*filer_pb.FileChunk) (delta []*filer_pb.FileChunk, err error) {
aData, aMeta, aErr := ResolveChunkManifest(lookupFileIdFn, as, 0, math.MaxInt64)
aData, aMeta, aErr := ResolveChunkManifest(ctx, lookupFileIdFn, as, 0, math.MaxInt64)
if aErr != nil {
return nil, aErr
}
bData, bMeta, bErr := ResolveChunkManifest(lookupFileIdFn, bs, 0, math.MaxInt64)
bData, bMeta, bErr := ResolveChunkManifest(ctx, lookupFileIdFn, bs, 0, math.MaxInt64)
if bErr != nil {
return nil, bErr
}
@ -180,9 +181,9 @@ func (cv *ChunkView) IsFullChunk() bool {
return cv.ViewSize == cv.ChunkSize
}
func ViewFromChunks(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) {
func ViewFromChunks(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, offset int64, size int64) (chunkViews *IntervalList[*ChunkView]) {
visibles, _ := NonOverlappingVisibleIntervals(lookupFileIdFn, chunks, offset, offset+size)
visibles, _ := NonOverlappingVisibleIntervals(ctx, lookupFileIdFn, chunks, offset, offset+size)
return ViewFromVisibleIntervals(visibles, offset, size)
@ -264,9 +265,9 @@ func MergeIntoChunkViews(chunkViews *IntervalList[*ChunkView], start int64, stop
// NonOverlappingVisibleIntervals translates the file chunk into VisibleInterval in memory
// If the file chunk content is a chunk manifest
func NonOverlappingVisibleIntervals(lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval], err error) {
func NonOverlappingVisibleIntervals(ctx context.Context, lookupFileIdFn wdclient.LookupFileIdFunctionType, chunks []*filer_pb.FileChunk, startOffset int64, stopOffset int64) (visibles *IntervalList[*VisibleInterval], err error) {
chunks, _, err = ResolveChunkManifest(lookupFileIdFn, chunks, startOffset, stopOffset)
chunks, _, err = ResolveChunkManifest(ctx, lookupFileIdFn, chunks, startOffset, stopOffset)
if err != nil {
return
}

View file

@ -1,6 +1,7 @@
package filer
import (
"context"
"github.com/stretchr/testify/assert"
"log"
"slices"
@ -65,7 +66,7 @@ func TestCompactFileChunksRealCase(t *testing.T) {
printChunks("before", chunks)
compacted, garbage := CompactFileChunks(nil, chunks)
compacted, garbage := CompactFileChunks(context.Background(), nil, chunks)
printChunks("compacted", compacted)
printChunks("garbage", garbage)

View file

@ -1,6 +1,7 @@
package filer
import (
"context"
"fmt"
"log"
"math"
@ -21,7 +22,7 @@ func TestCompactFileChunks(t *testing.T) {
{Offset: 110, Size: 200, FileId: "jkl", ModifiedTsNs: 300},
}
compacted, garbage := CompactFileChunks(nil, chunks)
compacted, garbage := CompactFileChunks(context.Background(), nil, chunks)
if len(compacted) != 3 {
t.Fatalf("unexpected compacted: %d", len(compacted))
@ -54,7 +55,7 @@ func TestCompactFileChunks2(t *testing.T) {
})
}
compacted, garbage := CompactFileChunks(nil, chunks)
compacted, garbage := CompactFileChunks(context.Background(), nil, chunks)
if len(compacted) != 4 {
t.Fatalf("unexpected compacted: %d", len(compacted))
@ -90,7 +91,7 @@ func TestRandomFileChunksCompact(t *testing.T) {
}
}
visibles, _ := NonOverlappingVisibleIntervals(nil, chunks, 0, math.MaxInt64)
visibles, _ := NonOverlappingVisibleIntervals(context.Background(), nil, chunks, 0, math.MaxInt64)
for visible := visibles.Front(); visible != nil; visible = visible.Next {
v := visible.Value
@ -228,7 +229,7 @@ func TestIntervalMerging(t *testing.T) {
for i, testcase := range testcases {
log.Printf("++++++++++ merged test case %d ++++++++++++++++++++", i)
intervals, _ := NonOverlappingVisibleIntervals(nil, testcase.Chunks, 0, math.MaxInt64)
intervals, _ := NonOverlappingVisibleIntervals(context.Background(), nil, testcase.Chunks, 0, math.MaxInt64)
x := -1
for visible := intervals.Front(); visible != nil; visible = visible.Next {
x++
@ -426,7 +427,7 @@ func TestChunksReading(t *testing.T) {
// continue
}
log.Printf("++++++++++ read test case %d ++++++++++++++++++++", i)
chunks := ViewFromChunks(nil, testcase.Chunks, testcase.Offset, testcase.Size)
chunks := ViewFromChunks(context.Background(), nil, testcase.Chunks, testcase.Offset, testcase.Size)
x := -1
for c := chunks.Front(); c != nil; c = c.Next {
x++
@ -473,7 +474,7 @@ func BenchmarkCompactFileChunks(b *testing.B) {
}
for n := 0; n < b.N; n++ {
CompactFileChunks(nil, chunks)
CompactFileChunks(context.Background(), nil, chunks)
}
}
@ -562,7 +563,7 @@ func TestCompactFileChunks3(t *testing.T) {
{Offset: 300, Size: 100, FileId: "def", ModifiedTsNs: 200},
}
compacted, _ := CompactFileChunks(nil, chunks)
compacted, _ := CompactFileChunks(context.Background(), nil, chunks)
if len(compacted) != 4 {
t.Fatalf("unexpected compacted: %d", len(compacted))

View file

@ -197,6 +197,10 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
return fmt.Errorf("entry name too long")
}
if entry.IsDirectory() {
entry.Attr.TtlSec = 0
}
oldEntry, _ := f.FindEntry(ctx, entry.FullPath)
/*
@ -216,28 +220,28 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
}
}
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
glog.V(4).InfofCtx(ctx, "InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
if err := f.Store.InsertEntry(ctx, entry); err != nil {
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
glog.ErrorfCtx(ctx, "insert entry %s: %v", entry.FullPath, err)
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
}
} else {
if o_excl {
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
glog.V(3).InfofCtx(ctx, "EEXIST: entry %s already exists", entry.FullPath)
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
}
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
glog.V(4).InfofCtx(ctx, "UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
glog.Errorf("update entry %s: %v", entry.FullPath, err)
glog.ErrorfCtx(ctx, "update entry %s: %v", entry.FullPath, err)
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
}
}
f.NotifyUpdateEvent(ctx, oldEntry, entry, true, isFromOtherCluster, signatures)
f.deleteChunksIfNotNew(oldEntry, entry)
f.deleteChunksIfNotNew(ctx, oldEntry, entry)
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
glog.V(4).InfofCtx(ctx, "CreateEntry %s: created", entry.FullPath)
return nil
}
@ -252,7 +256,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
// fmt.Printf("%d dirPath: %+v\n", level, dirPath)
// check the store directly
glog.V(4).Infof("find uncached directory: %s", dirPath)
glog.V(4).InfofCtx(ctx, "find uncached directory: %s", dirPath)
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
// no such existing directory
@ -287,11 +291,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
},
}
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
glog.V(2).InfofCtx(ctx, "create directory: %s %v", dirPath, dirEntry.Mode)
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
if mkdirErr != nil {
if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil {
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
glog.V(3).InfofCtx(ctx, "mkdir %s: %v", dirPath, mkdirErr)
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
}
} else {
@ -301,7 +305,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
}
} else if !dirEntry.IsDirectory() {
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
glog.ErrorfCtx(ctx, "CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
return fmt.Errorf("%s is a file", dirPath)
}
@ -312,11 +316,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
if oldEntry != nil {
entry.Attr.Crtime = oldEntry.Attr.Crtime
if oldEntry.IsDirectory() && !entry.IsDirectory() {
glog.Errorf("existing %s is a directory", oldEntry.FullPath)
glog.ErrorfCtx(ctx, "existing %s is a directory", oldEntry.FullPath)
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
}
if !oldEntry.IsDirectory() && entry.IsDirectory() {
glog.Errorf("existing %s is a file", oldEntry.FullPath)
glog.ErrorfCtx(ctx, "existing %s is a file", oldEntry.FullPath)
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
}
}

View file

@ -36,12 +36,12 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
// A case not handled:
// what if the chunk is in a different collection?
if shouldDeleteChunks {
f.maybeDeleteHardLinks(hardLinkIds)
f.maybeDeleteHardLinks(ctx, hardLinkIds)
}
return nil
})
if err != nil {
glog.V(2).Infof("delete directory %s: %v", p, err)
glog.V(2).InfofCtx(ctx, "delete directory %s: %v", p, err)
return fmt.Errorf("delete directory %s: %v", p, err)
}
}
@ -53,7 +53,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
}
if shouldDeleteChunks && !isDeleteCollection {
f.DeleteChunks(p, entry.GetChunks())
f.DeleteChunks(ctx, p, entry.GetChunks())
}
if isDeleteCollection {
@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
for {
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
if err != nil {
glog.Errorf("list folder %s: %v", entry.FullPath, err)
glog.ErrorfCtx(ctx, "list folder %s: %v", entry.FullPath, err)
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
}
if lastFileName == "" && !isRecursive && len(entries) > 0 {
// only for first iteration in the loop
glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
glog.V(2).InfofCtx(ctx, "deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
}
@ -110,21 +110,21 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
}
}
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
}
f.NotifyUpdateEvent(ctx, entry, nil, shouldDeleteChunks, isFromOtherCluster, signatures)
f.DeleteChunks(entry.FullPath, chunksToDelete)
f.DeleteChunks(ctx, entry.FullPath, chunksToDelete)
return nil
}
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
@ -150,10 +150,10 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
}
func (f *Filer) maybeDeleteHardLinks(hardLinkIds []HardLinkId) {
func (f *Filer) maybeDeleteHardLinks(ctx context.Context, hardLinkIds []HardLinkId) {
for _, hardLinkId := range hardLinkIds {
if err := f.Store.DeleteHardLink(context.Background(), hardLinkId); err != nil {
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
if err := f.Store.DeleteHardLink(ctx, hardLinkId); err != nil {
glog.ErrorfCtx(ctx, "delete hard link id %d : %v", hardLinkId, err)
}
}
}

View file

@ -1,6 +1,7 @@
package filer
import (
"context"
"strings"
"time"
@ -72,27 +73,27 @@ func (f *Filer) loopProcessingDeletion() {
}
}
func (f *Filer) DeleteUncommittedChunks(chunks []*filer_pb.FileChunk) {
f.doDeleteChunks(chunks)
func (f *Filer) DeleteUncommittedChunks(ctx context.Context, chunks []*filer_pb.FileChunk) {
f.doDeleteChunks(ctx, chunks)
}
func (f *Filer) DeleteChunks(fullpath util.FullPath, chunks []*filer_pb.FileChunk) {
func (f *Filer) DeleteChunks(ctx context.Context, fullpath util.FullPath, chunks []*filer_pb.FileChunk) {
rule := f.FilerConf.MatchStorageRule(string(fullpath))
if rule.DisableChunkDeletion {
return
}
f.doDeleteChunks(chunks)
f.doDeleteChunks(ctx, chunks)
}
func (f *Filer) doDeleteChunks(chunks []*filer_pb.FileChunk) {
func (f *Filer) doDeleteChunks(ctx context.Context, chunks []*filer_pb.FileChunk) {
for _, chunk := range chunks {
if !chunk.IsChunkManifest {
f.fileIdDeletionQueue.EnQueue(chunk.GetFileIdString())
continue
}
dataChunks, manifestResolveErr := ResolveOneChunkManifest(f.MasterClient.LookupFileId, chunk)
dataChunks, manifestResolveErr := ResolveOneChunkManifest(ctx, f.MasterClient.LookupFileId, chunk)
if manifestResolveErr != nil {
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
glog.V(0).InfofCtx(ctx, "failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
}
for _, dChunk := range dataChunks {
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
@ -107,7 +108,7 @@ func (f *Filer) DeleteChunksNotRecursive(chunks []*filer_pb.FileChunk) {
}
}
func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
func (f *Filer) deleteChunksIfNotNew(ctx context.Context, oldEntry, newEntry *Entry) {
var oldChunks, newChunks []*filer_pb.FileChunk
if oldEntry != nil {
oldChunks = oldEntry.GetChunks()
@ -116,9 +117,9 @@ func (f *Filer) deleteChunksIfNotNew(oldEntry, newEntry *Entry) {
newChunks = newEntry.GetChunks()
}
toDelete, err := MinusChunks(f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
toDelete, err := MinusChunks(ctx, f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
if err != nil {
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
glog.ErrorfCtx(ctx, "Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
return
}
f.DeleteChunksNotRecursive(toDelete)

View file

@ -58,7 +58,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
WritableVolumeCount: rule.VolumeGrowthCount,
}
assignResult, err := operation.Assign(f.GetMaster, f.GrpcDialOption, assignRequest)
assignResult, err := operation.Assign(context.Background(), f.GetMaster, f.GrpcDialOption, assignRequest)
if err != nil {
return nil, nil, fmt.Errorf("AssignVolume: %v", err)
}
@ -83,7 +83,7 @@ func (f *Filer) assignAndUpload(targetFile string, data []byte) (*operation.Assi
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}
uploadResult, err := uploader.UploadData(data, uploadOption)
uploadResult, err := uploader.UploadData(context.Background(), data, uploadOption)
if err != nil {
return nil, nil, fmt.Errorf("upload data %s: %v", targetUrl, err)
}

View file

@ -323,7 +323,7 @@ type LogFileIterator struct {
func newLogFileIterator(masterClient *wdclient.MasterClient, fileEntry *Entry, startTsNs, stopTsNs int64) *LogFileIterator {
return &LogFileIterator{
r: NewChunkStreamReaderFromFiler(masterClient, fileEntry.Chunks),
r: NewChunkStreamReaderFromFiler(context.Background(), masterClient, fileEntry.Chunks),
sizeBuf: make([]byte, 4),
startTsNs: startTsNs,
stopTsNs: stopTsNs,

View file

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/glog"
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
)
@ -31,7 +32,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry
// remove old hard link
if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
glog.V(4).InfofCtx(ctx, "handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}
@ -50,7 +51,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err
return encodeErr
}
glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
glog.V(4).InfofCtx(ctx, "setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
return fsw.KvPut(ctx, key, newBlob)
}
@ -63,16 +64,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
value, err := fsw.KvGet(ctx, key)
if err != nil {
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
glog.ErrorfCtx(ctx, "read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
if err = entry.DecodeAttributesAndChunks(value); err != nil {
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
glog.ErrorfCtx(ctx, "decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
return err
}
glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
glog.V(4).InfofCtx(ctx, "maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
return nil
}
@ -94,7 +95,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
entry.HardLinkCounter--
if entry.HardLinkCounter <= 0 {
glog.V(4).Infof("DeleteHardLink KvDelete %v", key)
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvDelete %v", key)
return fsw.KvDelete(ctx, key)
}
@ -103,7 +104,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
return encodeErr
}
glog.V(4).Infof("DeleteHardLink KvPut %v", key)
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvPut %v", key)
return fsw.KvPut(ctx, key, newBlob)
}

View file

@ -192,7 +192,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
// remove hard link
op := ctx.Value("OP")
if op != "MV" {
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}
@ -215,7 +215,7 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry
// remove hard link
op := ctx.Value("OP")
if op != "MV" {
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
return err
}

View file

@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
}
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View file

@ -4,13 +4,14 @@ import (
"bytes"
"context"
"fmt"
"io"
"os"
"github.com/syndtr/goleveldb/leveldb"
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/opt"
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
"io"
"os"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
@ -205,7 +206,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
}
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View file

@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View file

@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
if !eachEntryFunc(entry) {

View file

@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("find %s: %v", fullpath, err)
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
return nil, filer_pb.ErrNotFound
}
@ -234,16 +234,24 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
"directory": string(dirPath),
}
nameQuery := bson.M{}
if len(prefix) > 0 {
where["name"].(bson.M)["$regex"] = "^" + regexp.QuoteMeta(prefix)
nameQuery["$regex"] = "^" + regexp.QuoteMeta(prefix)
}
if includeStartFile {
where["name"].(bson.M)["$gte"] = startFileName
} else {
where["name"].(bson.M)["$gt"] = startFileName
if len(startFileName) > 0 {
if includeStartFile {
nameQuery["$gte"] = startFileName
} else {
nameQuery["$gt"] = startFileName
}
}
if len(nameQuery) > 0 {
where["name"] = nameQuery
}
optLimit := int64(limit)
opts := &options.FindOptions{Limit: &optLimit, Sort: bson.M{"name": 1}}
cur, err := store.connect.Database(store.database).Collection(store.collectionName).Find(ctx, where, opts)
@ -264,7 +272,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
lastFileName = data.Name
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
err = decodeErr
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
break
}
@ -275,7 +283,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
}
if err := cur.Close(ctx); err != nil {
glog.V(0).Infof("list iterator close: %v", err)
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
}
return lastFileName, err

View file

@ -3,6 +3,7 @@ package mongodb
import (
"context"
"fmt"
"github.com/seaweedfs/seaweedfs/weed/filer"
"github.com/seaweedfs/seaweedfs/weed/glog"
"go.mongodb.org/mongo-driver/bson"
@ -37,7 +38,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
var where = bson.M{"directory": dir, "name": name}
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
if err != mongo.ErrNoDocuments && err != nil {
glog.Errorf("kv get: %v", err)
glog.ErrorfCtx(ctx, "kv get: %v", err)
return nil, filer.ErrKvNotFound
}

View file

@ -29,7 +29,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
vidCache := make(map[string]*filer_pb.Locations)
var vicCacheLock sync.RWMutex
return func(fileId string) (targetUrls []string, err error) {
return func(ctx context.Context, fileId string) (targetUrls []string, err error) {
vid := VolumeId(fileId)
vicCacheLock.RLock()
locations, found := vidCache[vid]
@ -38,7 +38,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
if !found {
util.Retry("lookup volume "+vid, func() error {
err = filerClient.WithFilerClient(false, func(client filer_pb.SeaweedFilerClient) error {
resp, err := client.LookupVolume(context.Background(), &filer_pb.LookupVolumeRequest{
resp, err := client.LookupVolume(ctx, &filer_pb.LookupVolumeRequest{
VolumeIds: []string{vid},
})
if err != nil {
@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
locations = resp.LocationsMap[vid]
if locations == nil || len(locations.Locations) == 0 {
glog.V(0).Infof("failed to locate %s", fileId)
glog.V(0).InfofCtx(ctx, "failed to locate %s", fileId)
return fmt.Errorf("failed to locate %s", fileId)
}
vicCacheLock.Lock()

View file

@ -1,6 +1,7 @@
package filer
import (
"context"
"fmt"
"sync"
"sync/atomic"
@ -169,7 +170,7 @@ func (s *SingleChunkCacher) startCaching() {
s.cacheStartedCh <- struct{}{} // means this has been started
urlStrings, err := s.parent.lookupFileIdFn(s.chunkFileId)
urlStrings, err := s.parent.lookupFileIdFn(context.Background(), s.chunkFileId)
if err != nil {
s.err = fmt.Errorf("operation LookupFileId %s failed, err: %v", s.chunkFileId, err)
return
@ -177,7 +178,7 @@ func (s *SingleChunkCacher) startCaching() {
s.data = mem.Allocate(s.chunkSize)
_, s.err = util_http.RetriedFetchChunkData(s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0)
_, s.err = util_http.RetriedFetchChunkData(context.Background(), s.data, urlStrings, s.cipherKey, s.isGzipped, true, 0)
if s.err != nil {
mem.Free(s.data)
s.data = nil

View file

@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}

View file

@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir
entry, err := store.FindEntry(ctx, path)
lastFileName = fileName
if err != nil {
glog.V(0).Infof("list %s : %v", path, err)
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
if err == filer_pb.ErrNotFound {
continue
}

Some files were not shown because too many files have changed in this diff Show more