mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-06-29 16:22:46 +02:00
Compare commits
No commits in common. "master" and "3.90" have entirely different histories.
181 changed files with 12210 additions and 10453 deletions
2
.github/workflows/container_dev.yml
vendored
2
.github/workflows/container_dev.yml
vendored
|
@ -36,7 +36,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
|
|
2
.github/workflows/container_latest.yml
vendored
2
.github/workflows/container_latest.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
with:
|
||||
buildkitd-flags: "--debug"
|
||||
-
|
||||
|
|
2
.github/workflows/container_release1.yml
vendored
2
.github/workflows/container_release1.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
|
|
2
.github/workflows/container_release2.yml
vendored
2
.github/workflows/container_release2.yml
vendored
|
@ -38,7 +38,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
|
|
2
.github/workflows/container_release3.yml
vendored
2
.github/workflows/container_release3.yml
vendored
|
@ -38,7 +38,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
|
|
2
.github/workflows/container_release4.yml
vendored
2
.github/workflows/container_release4.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
|
|
2
.github/workflows/container_release5.yml
vendored
2
.github/workflows/container_release5.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
|||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request'
|
||||
|
|
171
.github/workflows/deploy_telemetry.yml
vendored
171
.github/workflows/deploy_telemetry.yml
vendored
|
@ -1,171 +0,0 @@
|
|||
# This workflow will build and deploy the SeaweedFS telemetry server
|
||||
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||
|
||||
name: Deploy Telemetry Server
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
setup:
|
||||
description: 'Run first-time server setup'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
deploy:
|
||||
description: 'Deploy telemetry server to remote server'
|
||||
required: true
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24'
|
||||
|
||||
- name: Build Telemetry Server
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||
run: |
|
||||
go mod tidy
|
||||
echo "Building telemetry server..."
|
||||
GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go
|
||||
ls -la telemetry-server
|
||||
echo "Build completed successfully"
|
||||
|
||||
- name: First-time Server Setup
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.setup
|
||||
env:
|
||||
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||
chmod 600 ~/.ssh/deploy_key
|
||||
echo "Host *" > ~/.ssh/config
|
||||
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||
|
||||
# Create all required directories with proper permissions
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \
|
||||
chmod 755 ~/seaweedfs-telemetry/logs && \
|
||||
chmod 755 ~/seaweedfs-telemetry/data && \
|
||||
touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \
|
||||
chmod 644 ~/seaweedfs-telemetry/logs/*.log"
|
||||
|
||||
# Create systemd service file
|
||||
echo "
|
||||
[Unit]
|
||||
Description=SeaweedFS Telemetry Server
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=$REMOTE_USER
|
||||
WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry
|
||||
ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log
|
||||
StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target" > telemetry.service
|
||||
|
||||
# Setup logrotate configuration
|
||||
echo "# SeaweedFS Telemetry service log rotation
|
||||
/home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log {
|
||||
daily
|
||||
rotate 30
|
||||
compress
|
||||
delaycompress
|
||||
missingok
|
||||
notifempty
|
||||
create 644 $REMOTE_USER $REMOTE_USER
|
||||
postrotate
|
||||
systemctl restart telemetry.service
|
||||
endscript
|
||||
}" > telemetry_logrotate
|
||||
|
||||
# Copy configuration files
|
||||
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
|
||||
# Copy and install service and logrotate files
|
||||
scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \
|
||||
sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \
|
||||
sudo systemctl daemon-reload && \
|
||||
sudo systemctl enable telemetry.service"
|
||||
|
||||
echo "✅ First-time setup completed successfully!"
|
||||
echo "📋 Next step: Run the deployment to install the telemetry server binary"
|
||||
echo " 1. Go to GitHub Actions → Deploy Telemetry Server"
|
||||
echo " 2. Click 'Run workflow'"
|
||||
echo " 3. Check 'Deploy telemetry server to remote server'"
|
||||
echo " 4. Click 'Run workflow'"
|
||||
|
||||
rm -f ~/.ssh/deploy_key
|
||||
|
||||
- name: Deploy Telemetry Server to Remote Server
|
||||
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||
env:
|
||||
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||
run: |
|
||||
mkdir -p ~/.ssh
|
||||
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||
chmod 600 ~/.ssh/deploy_key
|
||||
echo "Host *" > ~/.ssh/config
|
||||
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||
|
||||
# Create temp directory and copy binary
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp"
|
||||
scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/
|
||||
|
||||
# Copy updated configuration files
|
||||
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||
|
||||
# Check if service exists and deploy accordingly
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then
|
||||
echo 'Service exists, performing update...'
|
||||
sudo systemctl stop telemetry.service
|
||||
mkdir -p ~/seaweedfs-telemetry/bin
|
||||
mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/
|
||||
chmod +x ~/seaweedfs-telemetry/bin/telemetry-server
|
||||
sudo systemctl start telemetry.service
|
||||
sudo systemctl status telemetry.service
|
||||
else
|
||||
echo 'ERROR: telemetry.service not found!'
|
||||
echo 'Please run the first-time setup before deploying.'
|
||||
echo 'Go to GitHub Actions → Deploy Telemetry Server → Run workflow → Check \"Run first-time server setup\"'
|
||||
exit 1
|
||||
fi"
|
||||
|
||||
# Verify deployment
|
||||
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||
echo 'Waiting for service to start...'
|
||||
sleep 5
|
||||
curl -f http://localhost:8353/health || echo 'Health check failed'"
|
||||
|
||||
rm -f ~/.ssh/deploy_key
|
||||
|
||||
- name: Notify Deployment Status
|
||||
if: always()
|
||||
run: |
|
||||
if [ "${{ job.status }}" == "success" ]; then
|
||||
echo "✅ Telemetry server deployment successful"
|
||||
echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353"
|
||||
echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics"
|
||||
else
|
||||
echo "❌ Telemetry server deployment failed"
|
||||
fi
|
|
@ -73,7 +73,6 @@ Table of Contents
|
|||
* [Installation Guide](#installation-guide)
|
||||
* [Disk Related Topics](#disk-related-topics)
|
||||
* [Benchmark](#benchmark)
|
||||
* [Enterprise](#enterprise)
|
||||
* [License](#license)
|
||||
|
||||
# Quick Start #
|
||||
|
@ -652,13 +651,6 @@ Total Errors:0.
|
|||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## Enterprise ##
|
||||
|
||||
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
|
||||
which has a self-healing storage format with better data protection.
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
## License ##
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
|
234
go.mod
234
go.mod
|
@ -5,9 +5,9 @@ go 1.24
|
|||
toolchain go1.24.1
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.121.1 // indirect
|
||||
cloud.google.com/go v0.121.0 // indirect
|
||||
cloud.google.com/go/pubsub v1.49.0
|
||||
cloud.google.com/go/storage v1.55.0
|
||||
cloud.google.com/go/storage v1.54.0
|
||||
github.com/Azure/azure-pipeline-go v0.2.3
|
||||
github.com/Azure/azure-storage-blob-go v0.15.0
|
||||
github.com/Shopify/sarama v1.38.1
|
||||
|
@ -31,7 +31,7 @@ require (
|
|||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||
github.com/go-redsync/redsync/v4 v4.13.0
|
||||
github.com/go-sql-driver/mysql v1.9.3
|
||||
github.com/go-sql-driver/mysql v1.9.2
|
||||
github.com/go-zookeeper/zk v1.0.3 // indirect
|
||||
github.com/gocql/gocql v1.7.0
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||
|
@ -69,8 +69,8 @@ require (
|
|||
github.com/posener/complete v1.2.3
|
||||
github.com/pquerna/cachecontrol v0.2.0
|
||||
github.com/prometheus/client_golang v1.22.0
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.64.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.16.1
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
|
@ -94,30 +94,30 @@ require (
|
|||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.6.1
|
||||
go.mongodb.org/mongo-driver v1.17.4
|
||||
go.etcd.io/etcd/client/v3 v3.6.0
|
||||
go.mongodb.org/mongo-driver v1.17.3
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
gocloud.dev v0.41.0
|
||||
gocloud.dev/pubsub/natspubsub v0.41.0
|
||||
gocloud.dev/pubsub/rabbitpubsub v0.41.0
|
||||
golang.org/x/crypto v0.39.0
|
||||
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
|
||||
golang.org/x/image v0.28.0
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/crypto v0.38.0
|
||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
||||
golang.org/x/image v0.27.0
|
||||
golang.org/x/net v0.40.0
|
||||
golang.org/x/oauth2 v0.30.0 // indirect
|
||||
golang.org/x/sys v0.33.0
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/tools v0.34.0
|
||||
golang.org/x/text v0.25.0 // indirect
|
||||
golang.org/x/tools v0.33.0
|
||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||
google.golang.org/api v0.238.0
|
||||
google.golang.org/api v0.234.0
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/grpc v1.73.0
|
||||
google.golang.org/grpc v1.72.1
|
||||
google.golang.org/protobuf v1.36.6
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
modernc.org/b v1.0.0 // indirect
|
||||
modernc.org/mathutil v1.7.1
|
||||
modernc.org/memory v1.11.0 // indirect
|
||||
modernc.org/sqlite v1.38.0
|
||||
modernc.org/sqlite v1.37.1
|
||||
modernc.org/strutil v1.2.1
|
||||
)
|
||||
|
||||
|
@ -125,133 +125,126 @@ require (
|
|||
github.com/Jille/raft-grpc-transport v1.6.1
|
||||
github.com/arangodb/go-driver v1.6.6
|
||||
github.com/armon/go-metrics v0.4.1
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.5
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.17
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.81.0
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.3
|
||||
github.com/aws/aws-sdk-go-v2/config v1.29.14
|
||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.67
|
||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.0
|
||||
github.com/cognusion/imaging v1.0.2
|
||||
github.com/fluent/fluent-logger-golang v1.10.0
|
||||
github.com/getsentry/sentry-go v0.33.0
|
||||
github.com/getsentry/sentry-go v0.31.1
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
|
||||
github.com/hanwen/go-fuse/v2 v2.8.0
|
||||
github.com/hanwen/go-fuse/v2 v2.7.3-0.20250605191109-50f6569d1a7d
|
||||
github.com/hashicorp/raft v1.7.3
|
||||
github.com/hashicorp/raft-boltdb/v2 v2.3.1
|
||||
github.com/minio/crc64nvme v1.0.2
|
||||
github.com/minio/crc64nvme v1.0.1
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/parquet-go/parquet-go v0.25.1
|
||||
github.com/pkg/sftp v1.13.9
|
||||
github.com/parquet-go/parquet-go v0.24.0
|
||||
github.com/pkg/sftp v1.13.7
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/rclone/rclone v1.70.1
|
||||
github.com/rdleal/intervalst v1.5.0
|
||||
github.com/redis/go-redis/v9 v9.10.0
|
||||
github.com/rclone/rclone v1.69.3
|
||||
github.com/rdleal/intervalst v1.4.1
|
||||
github.com/redis/go-redis/v9 v9.8.0
|
||||
github.com/schollz/progressbar/v3 v3.18.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.5
|
||||
github.com/tarantool/go-tarantool/v2 v2.3.2
|
||||
github.com/tikv/client-go/v2 v2.0.7
|
||||
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.111.0
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.1
|
||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.108.3
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.6.0
|
||||
go.uber.org/atomic v1.11.0
|
||||
golang.org/x/sync v0.15.0
|
||||
golang.org/x/sync v0.14.0
|
||||
google.golang.org/grpc/security/advancedtls v1.0.0
|
||||
)
|
||||
|
||||
require (
|
||||
cel.dev/expr v0.23.0 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cel.dev/expr v0.22.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.1 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/iam v1.5.2 // indirect
|
||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||
filippo.io/edwards25519 v1.1.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0 // indirect
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.173 // indirect
|
||||
github.com/Files-com/files-sdk-go/v3 v3.2.97 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||
github.com/IBM/go-sdk-core/v5 v5.20.0 // indirect
|
||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.1.3 // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.8.1 // indirect
|
||||
github.com/abbot/go-http-auth v0.4.0 // indirect
|
||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.77 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sns v1.34.2 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
|
||||
github.com/aws/smithy-go v1.22.4 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
|
||||
github.com/aws/smithy-go v1.22.3 // indirect
|
||||
github.com/boltdb/bolt v1.3.1 // indirect
|
||||
github.com/bradenaw/juniper v0.15.3 // indirect
|
||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||
github.com/buengese/sgzip v0.1.1 // indirect
|
||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
|
||||
github.com/cloudflare/circl v1.6.1 // indirect
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.10.0 // indirect
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect
|
||||
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
|
||||
github.com/cloudflare/circl v1.3.7 // indirect
|
||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0 // indirect
|
||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
|
||||
github.com/creasty/defaults v1.8.0 // indirect
|
||||
github.com/creasty/defaults v1.7.0 // indirect
|
||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||
github.com/d4l3k/messagediff v1.2.1 // indirect
|
||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
|
||||
github.com/ebitengine/purego v0.8.4 // indirect
|
||||
github.com/ebitengine/purego v0.8.3 // indirect
|
||||
github.com/elastic/gosigar v0.14.2 // indirect
|
||||
github.com/emersion/go-message v0.18.2 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||
github.com/emersion/go-message v0.18.0 // indirect
|
||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||
github.com/fatih/color v1.16.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.2.2 // indirect
|
||||
github.com/flynn/noise v1.0.1 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.7 // indirect
|
||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
||||
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
|
||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||
github.com/go-openapi/errors v0.22.1 // indirect
|
||||
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||
github.com/go-resty/resty/v2 v2.16.5 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||
github.com/gofrs/flock v0.12.1 // indirect
|
||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
||||
github.com/gofrs/flock v0.8.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||
github.com/google/s2a-go v0.1.9 // indirect
|
||||
|
@ -277,35 +270,33 @@ require (
|
|||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
|
||||
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
|
||||
github.com/kr/fs v0.1.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/lanrat/extsort v1.0.2 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/lpar/date v1.0.0 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/nats.go v1.40.1 // indirect
|
||||
github.com/nats-io/nkeys v0.4.10 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||
github.com/ncw/swift/v2 v2.0.4 // indirect
|
||||
github.com/ncw/swift/v2 v2.0.3 // indirect
|
||||
github.com/nxadm/tail v1.4.11 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.19.0 // indirect
|
||||
github.com/onsi/gomega v1.34.1 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/oracle/oci-go-sdk/v65 v65.93.0 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.11.3 // indirect
|
||||
github.com/oracle/oci-go-sdk/v65 v65.80.0 // indirect
|
||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||
|
@ -316,31 +307,31 @@ require (
|
|||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||
github.com/pkg/xattr v0.4.10 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
|
||||
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||
github.com/relvacode/iso8601 v1.3.0 // indirect
|
||||
github.com/rfjakob/eme v1.1.2 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||
github.com/samber/lo v1.50.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
|
||||
github.com/samber/lo v1.47.0 // indirect
|
||||
github.com/shirou/gopsutil/v4 v4.24.12 // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||
github.com/sony/gobreaker v1.0.0 // indirect
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect
|
||||
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
|
||||
github.com/spf13/pflag v1.0.6 // indirect
|
||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||
github.com/subosito/gotenv v1.6.0 // indirect
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002 // indirect
|
||||
github.com/tarantool/go-iproto v1.1.0 // indirect
|
||||
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
|
||||
github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect
|
||||
github.com/tinylib/msgp v1.3.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
||||
github.com/unknwon/goconfig v1.0.0 // indirect
|
||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||
|
@ -352,37 +343,36 @@ require (
|
|||
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
||||
github.com/zeebo/errs v1.4.0 // indirect
|
||||
go.etcd.io/bbolt v1.4.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.1 // indirect
|
||||
go.etcd.io/bbolt v1.3.10 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.6.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||
gopkg.in/validator.v2 v2.0.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
modernc.org/libc v1.65.10 // indirect
|
||||
modernc.org/libc v1.65.7 // indirect
|
||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
storj.io/common v0.0.0-20250605163628-70ca83b6228e // indirect
|
||||
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
|
||||
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
|
||||
storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect
|
||||
storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 // indirect
|
||||
storj.io/eventkit v0.0.0-20240415002644-1d9596fee086 // indirect
|
||||
storj.io/infectious v0.0.2 // indirect
|
||||
storj.io/picobuf v0.0.4 // indirect
|
||||
storj.io/picobuf v0.0.3 // indirect
|
||||
storj.io/uplink v1.13.1 // indirect
|
||||
)
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
apiVersion: v1
|
||||
description: SeaweedFS
|
||||
name: seaweedfs
|
||||
appVersion: "3.92"
|
||||
appVersion: "3.90"
|
||||
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
|
||||
version: 4.0.392
|
||||
version: 4.0.390
|
||||
|
|
|
@ -144,8 +144,3 @@ stringData:
|
|||
# this key must be an inline json config file
|
||||
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"snu8yoP6QAlY0ne4","secretKey":"PNzBcmeLNEdR0oviwm04NQAicOrDH1Km"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"SCigFee6c5lbi04A","secretKey":"kgFhbT38R8WUYVtiFQ1OiSVOrYr3NKku"}],"actions":["Read"]}]}'
|
||||
```
|
||||
|
||||
## Enterprise
|
||||
|
||||
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
|
||||
which has a self-healing storage format with better data protection.
|
||||
|
|
|
@ -162,9 +162,6 @@ spec:
|
|||
{{- if .Values.filer.metricsPort }}
|
||||
-metricsPort={{ .Values.filer.metricsPort }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.metricsIp }}
|
||||
-metricsIp={{ .Values.filer.metricsIp }} \
|
||||
{{- end }}
|
||||
{{- if .Values.filer.redirectOnRead }}
|
||||
-redirectOnRead \
|
||||
{{- end }}
|
||||
|
@ -190,7 +187,6 @@ spec:
|
|||
-encryptVolumeData \
|
||||
{{- end }}
|
||||
-ip=${POD_IP} \
|
||||
-ip.bind={{ .Values.filer.ipBind }} \
|
||||
{{- if .Values.filer.filerGroup}}
|
||||
-filerGroup={{ .Values.filer.filerGroup}} \
|
||||
{{- end }}
|
||||
|
@ -223,10 +219,7 @@ spec:
|
|||
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
|
||||
{{- range .Values.filer.extraArgs }}
|
||||
{{ . }} \
|
||||
{{- end }}
|
||||
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
{{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }}
|
||||
- name: seaweedfs-filer-log-volume
|
||||
|
|
|
@ -157,36 +157,18 @@ spec:
|
|||
{{- if .Values.master.metricsPort }}
|
||||
-metricsPort={{ .Values.master.metricsPort }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.metricsIp }}
|
||||
-metricsIp={{ .Values.master.metricsIp }} \
|
||||
{{- end }}
|
||||
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
|
||||
{{- if .Values.master.disableHttp }}
|
||||
-disableHttp \
|
||||
{{- end }}
|
||||
{{- if .Values.master.resumeState }}
|
||||
-resumeState \
|
||||
{{- end }}
|
||||
{{- if .Values.master.raftHashicorp }}
|
||||
-raftHashicorp \
|
||||
{{- end }}
|
||||
{{- if .Values.master.raftBootstrap }}
|
||||
-raftBootstrap \
|
||||
{{- end }}
|
||||
{{- if .Values.master.electionTimeout }}
|
||||
-electionTimeout={{ .Values.master.electionTimeout }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.heartbeatInterval }}
|
||||
-heartbeatInterval={{ .Values.master.heartbeatInterval }} \
|
||||
{{- if .Values.master.pulseSeconds }}
|
||||
-pulseSeconds={{ .Values.master.pulseSeconds }} \
|
||||
{{- end }}
|
||||
{{- if .Values.master.garbageThreshold }}
|
||||
-garbageThreshold={{ .Values.master.garbageThreshold }} \
|
||||
{{- end }}
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \
|
||||
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} \
|
||||
{{- range .Values.master.extraArgs }}
|
||||
{{ . }} \
|
||||
{{- end }}
|
||||
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
- name : data-{{ .Release.Namespace }}
|
||||
mountPath: /data
|
||||
|
|
|
@ -150,9 +150,6 @@ spec:
|
|||
{{- if .Values.volume.metricsPort }}
|
||||
-metricsPort={{ .Values.volume.metricsPort }} \
|
||||
{{- end }}
|
||||
{{- if .Values.volume.metricsIp }}
|
||||
-metricsIp={{ .Values.volume.metricsIp }} \
|
||||
{{- end }}
|
||||
-dir {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}/{{$dir.name}}{{end}} \
|
||||
{{- if .Values.volume.idx }}
|
||||
-dir.idx=/idx \
|
||||
|
@ -186,10 +183,7 @@ spec:
|
|||
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
|
||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume.{{ .Release.Namespace }} \
|
||||
-compactionMBps={{ .Values.volume.compactionMBps }} \
|
||||
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
|
||||
{{- range .Values.volume.extraArgs }}
|
||||
{{ . }} \
|
||||
{{- end }}
|
||||
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
|
||||
volumeMounts:
|
||||
{{- range $dir := .Values.volume.dataDirs }}
|
||||
{{- if not ( eq $dir.type "custom" ) }}
|
||||
|
|
|
@ -56,11 +56,12 @@ master:
|
|||
port: 9333
|
||||
grpcPort: 19333
|
||||
metricsPort: 9327
|
||||
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
|
||||
ipBind: "0.0.0.0"
|
||||
volumePreallocate: false
|
||||
volumeSizeLimitMB: 1000
|
||||
loggingOverrideLevel: null
|
||||
# number of seconds between heartbeats, default 5
|
||||
pulseSeconds: null
|
||||
# threshold to vacuum and reclaim spaces, default 0.3 (30%)
|
||||
garbageThreshold: null
|
||||
# Prometheus push interval in seconds, default 15
|
||||
|
@ -74,25 +75,6 @@ master:
|
|||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
|
||||
# Resume previous state on start master server
|
||||
resumeState: false
|
||||
# Use Hashicorp Raft
|
||||
raftHashicorp: false
|
||||
# Whether to bootstrap the Raft cluster. Only use it when use Hashicorp Raft
|
||||
raftBootstrap: false
|
||||
|
||||
# election timeout of master servers
|
||||
electionTimeout: "10s"
|
||||
# heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)
|
||||
heartbeatInterval: "300ms"
|
||||
|
||||
# Custom command line arguments to add to the master command
|
||||
# Example to fix IPv6 metrics connectivity issues:
|
||||
# extraArgs: ["-metricsIp", "0.0.0.0"]
|
||||
# Example with multiple args:
|
||||
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
|
||||
extraArgs: []
|
||||
|
||||
config: |-
|
||||
# Enter any extra configuration for master.toml here.
|
||||
# It may be a multi-line string.
|
||||
|
@ -295,7 +277,6 @@ volume:
|
|||
port: 8080
|
||||
grpcPort: 18080
|
||||
metricsPort: 9327
|
||||
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
|
||||
ipBind: "0.0.0.0"
|
||||
replicas: 1
|
||||
loggingOverrideLevel: null
|
||||
|
@ -308,13 +289,6 @@ volume:
|
|||
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
|
||||
minFreeSpacePercent: 7
|
||||
|
||||
# Custom command line arguments to add to the volume command
|
||||
# Example to fix IPv6 metrics connectivity issues:
|
||||
# extraArgs: ["-metricsIp", "0.0.0.0"]
|
||||
# Example with multiple args:
|
||||
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
|
||||
extraArgs: []
|
||||
|
||||
# For each data disk you may use ANY storage-class, example with local-path-provisioner
|
||||
# Annotations are optional.
|
||||
# dataDirs:
|
||||
|
@ -546,8 +520,6 @@ filer:
|
|||
port: 8888
|
||||
grpcPort: 18888
|
||||
metricsPort: 9327
|
||||
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
|
||||
ipBind: "0.0.0.0" # IP address to bind to. Set to 0.0.0.0 to allow external traffic
|
||||
loggingOverrideLevel: null
|
||||
filerGroup: ""
|
||||
# prefer to read and write to volumes in this data center (not set by default)
|
||||
|
@ -575,13 +547,6 @@ filer:
|
|||
# Disable http request, only gRpc operations are allowed
|
||||
disableHttp: false
|
||||
|
||||
# Custom command line arguments to add to the filer command
|
||||
# Example to fix IPv6 metrics connectivity issues:
|
||||
# extraArgs: ["-metricsIp", "0.0.0.0"]
|
||||
# Example with multiple args:
|
||||
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
|
||||
extraArgs: []
|
||||
|
||||
# Add a custom notification.toml to configure filer notifications
|
||||
# Example:
|
||||
# notificationConfig: |-
|
||||
|
|
|
@ -1,271 +0,0 @@
|
|||
# SeaweedFS Telemetry Server Deployment
|
||||
|
||||
This document describes how to deploy the SeaweedFS telemetry server to a remote server using GitHub Actions.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. A remote Linux server with:
|
||||
- SSH access
|
||||
- systemd (for service management)
|
||||
- Optional: Prometheus and Grafana (for monitoring)
|
||||
|
||||
2. GitHub repository secrets configured (see [Setup GitHub Secrets](#setup-github-secrets) below):
|
||||
- `TELEMETRY_SSH_PRIVATE_KEY`: SSH private key for accessing the remote server
|
||||
- `TELEMETRY_HOST`: Remote server hostname or IP address
|
||||
- `TELEMETRY_USER`: Username for SSH access
|
||||
|
||||
## Setup GitHub Secrets
|
||||
|
||||
Before using the deployment workflow, you need to configure the required secrets in your GitHub repository.
|
||||
|
||||
### Step 1: Generate SSH Key Pair
|
||||
|
||||
On your local machine, generate a new SSH key pair specifically for deployment:
|
||||
|
||||
```bash
|
||||
# Generate a new SSH key pair
|
||||
ssh-keygen -t ed25519 -C "seaweedfs-telemetry-deploy" -f ~/.ssh/seaweedfs_telemetry_deploy
|
||||
|
||||
# This creates two files:
|
||||
# ~/.ssh/seaweedfs_telemetry_deploy (private key)
|
||||
# ~/.ssh/seaweedfs_telemetry_deploy.pub (public key)
|
||||
```
|
||||
|
||||
### Step 2: Configure Remote Server
|
||||
|
||||
Copy the public key to your remote server:
|
||||
|
||||
```bash
|
||||
# Copy public key to remote server
|
||||
ssh-copy-id -i ~/.ssh/seaweedfs_telemetry_deploy.pub user@your-server.com
|
||||
|
||||
# Or manually append to authorized_keys
|
||||
cat ~/.ssh/seaweedfs_telemetry_deploy.pub | ssh user@your-server.com "mkdir -p ~/.ssh && cat >> ~/.ssh/authorized_keys"
|
||||
```
|
||||
|
||||
Test the SSH connection:
|
||||
|
||||
```bash
|
||||
# Test SSH connection with the new key
|
||||
ssh -i ~/.ssh/seaweedfs_telemetry_deploy user@your-server.com "echo 'SSH connection successful'"
|
||||
```
|
||||
|
||||
### Step 3: Add Secrets to GitHub Repository
|
||||
|
||||
1. Go to your GitHub repository
|
||||
2. Click on **Settings** tab
|
||||
3. In the sidebar, click **Secrets and variables** → **Actions**
|
||||
4. Click **New repository secret** for each of the following:
|
||||
|
||||
#### TELEMETRY_SSH_PRIVATE_KEY
|
||||
|
||||
```bash
|
||||
# Display the private key content
|
||||
cat ~/.ssh/seaweedfs_telemetry_deploy
|
||||
```
|
||||
|
||||
- **Name**: `TELEMETRY_SSH_PRIVATE_KEY`
|
||||
- **Value**: Copy the entire private key content, including the `-----BEGIN OPENSSH PRIVATE KEY-----` and `-----END OPENSSH PRIVATE KEY-----` lines
|
||||
|
||||
#### TELEMETRY_HOST
|
||||
|
||||
- **Name**: `TELEMETRY_HOST`
|
||||
- **Value**: Your server's hostname or IP address (e.g., `telemetry.example.com` or `192.168.1.100`)
|
||||
|
||||
#### TELEMETRY_USER
|
||||
|
||||
- **Name**: `TELEMETRY_USER`
|
||||
- **Value**: The username on the remote server (e.g., `ubuntu`, `deploy`, or your username)
|
||||
|
||||
### Step 4: Verify Configuration
|
||||
|
||||
Create a simple test workflow or manually trigger the deployment to verify the secrets are working correctly.
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **Dedicated SSH Key**: Use a separate SSH key only for deployment
|
||||
2. **Limited Permissions**: Create a dedicated user on the remote server with minimal required permissions
|
||||
3. **Key Rotation**: Regularly rotate SSH keys
|
||||
4. **Server Access**: Restrict SSH access to specific IP ranges if possible
|
||||
|
||||
### Example Server Setup
|
||||
|
||||
If you're setting up a new server, here's a basic configuration:
|
||||
|
||||
```bash
|
||||
# On the remote server, create a dedicated user for deployment
|
||||
sudo useradd -m -s /bin/bash seaweedfs-deploy
|
||||
sudo usermod -aG sudo seaweedfs-deploy # Only if sudo access is needed
|
||||
|
||||
# Switch to the deployment user
|
||||
sudo su - seaweedfs-deploy
|
||||
|
||||
# Create SSH directory
|
||||
mkdir -p ~/.ssh
|
||||
chmod 700 ~/.ssh
|
||||
|
||||
# Add your public key (paste the content of seaweedfs_telemetry_deploy.pub)
|
||||
nano ~/.ssh/authorized_keys
|
||||
chmod 600 ~/.ssh/authorized_keys
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### SSH Connection Issues
|
||||
|
||||
```bash
|
||||
# Test SSH connection manually
|
||||
ssh -i ~/.ssh/seaweedfs_telemetry_deploy -v user@your-server.com
|
||||
|
||||
# Check SSH key permissions
|
||||
ls -la ~/.ssh/seaweedfs_telemetry_deploy*
|
||||
# Should show: -rw------- for private key, -rw-r--r-- for public key
|
||||
```
|
||||
|
||||
#### GitHub Actions Fails
|
||||
|
||||
1. **Check secrets**: Ensure all three secrets are properly set in GitHub
|
||||
2. **Verify SSH key**: Make sure the entire private key (including headers/footers) is copied
|
||||
3. **Test connectivity**: Manually SSH to the server from your local machine
|
||||
4. **Check user permissions**: Ensure the remote user has necessary permissions
|
||||
|
||||
## GitHub Actions Workflow
|
||||
|
||||
The deployment workflow (`.github/workflows/deploy_telemetry.yml`) provides two main operations:
|
||||
|
||||
### 1. First-time Setup
|
||||
|
||||
Run this once to set up the remote server:
|
||||
|
||||
1. Go to GitHub Actions in your repository
|
||||
2. Select "Deploy Telemetry Server" workflow
|
||||
3. Click "Run workflow"
|
||||
4. Check "Run first-time server setup"
|
||||
5. Click "Run workflow"
|
||||
|
||||
This will:
|
||||
- Create necessary directories on the remote server
|
||||
- Set up systemd service configuration
|
||||
- Configure log rotation
|
||||
- Upload Grafana dashboard and Prometheus configuration
|
||||
- Enable the telemetry service (but not start it yet)
|
||||
|
||||
**Note**: The setup only prepares the infrastructure. You need to run a deployment afterward to install and start the telemetry server.
|
||||
|
||||
|
||||
### 2. Deploy Updates
|
||||
|
||||
To deploy updates, manually trigger deployment:
|
||||
1. Go to GitHub Actions in your repository
|
||||
2. Select "Deploy Telemetry Server" workflow
|
||||
3. Click "Run workflow"
|
||||
4. Check "Deploy telemetry server to remote server"
|
||||
5. Click "Run workflow"
|
||||
|
||||
## Server Directory Structure
|
||||
|
||||
After setup, the remote server will have:
|
||||
|
||||
```
|
||||
~/seaweedfs-telemetry/
|
||||
├── bin/
|
||||
│ └── telemetry-server # Binary executable
|
||||
├── logs/
|
||||
│ ├── telemetry.log # Application logs
|
||||
│ └── telemetry.error.log # Error logs
|
||||
├── data/ # Data directory (if needed)
|
||||
├── grafana-dashboard.json # Grafana dashboard configuration
|
||||
└── prometheus.yml # Prometheus configuration
|
||||
```
|
||||
|
||||
## Service Management
|
||||
|
||||
The telemetry server runs as a systemd service:
|
||||
|
||||
```bash
|
||||
# Check service status
|
||||
sudo systemctl status telemetry.service
|
||||
|
||||
# View logs
|
||||
sudo journalctl -u telemetry.service -f
|
||||
|
||||
# Restart service
|
||||
sudo systemctl restart telemetry.service
|
||||
|
||||
# Stop/start service
|
||||
sudo systemctl stop telemetry.service
|
||||
sudo systemctl start telemetry.service
|
||||
```
|
||||
|
||||
## Accessing the Service
|
||||
|
||||
After deployment, the telemetry server will be available at:
|
||||
|
||||
- **Dashboard**: `http://your-server:8353`
|
||||
- **API**: `http://your-server:8353/api/*`
|
||||
- **Metrics**: `http://your-server:8353/metrics`
|
||||
- **Health Check**: `http://your-server:8353/health`
|
||||
|
||||
## Optional: Prometheus and Grafana Integration
|
||||
|
||||
### Prometheus Setup
|
||||
|
||||
1. Install Prometheus on your server
|
||||
2. Update `/etc/prometheus/prometheus.yml` to include:
|
||||
```yaml
|
||||
scrape_configs:
|
||||
- job_name: 'seaweedfs-telemetry'
|
||||
static_configs:
|
||||
- targets: ['localhost:8353']
|
||||
metrics_path: '/metrics'
|
||||
```
|
||||
|
||||
### Grafana Setup
|
||||
|
||||
1. Install Grafana on your server
|
||||
2. Import the dashboard from `~/seaweedfs-telemetry/grafana-dashboard.json`
|
||||
3. Configure Prometheus as a data source pointing to your Prometheus instance
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Deployment Fails
|
||||
|
||||
1. Check GitHub Actions logs for detailed error messages
|
||||
2. Verify SSH connectivity: `ssh user@host`
|
||||
3. Ensure all required secrets are configured in GitHub
|
||||
|
||||
### Service Won't Start
|
||||
|
||||
1. Check service logs: `sudo journalctl -u telemetry.service`
|
||||
2. Verify binary permissions: `ls -la ~/seaweedfs-telemetry/bin/`
|
||||
3. Test binary manually: `~/seaweedfs-telemetry/bin/telemetry-server -help`
|
||||
|
||||
### Port Conflicts
|
||||
|
||||
If port 8353 is already in use:
|
||||
|
||||
1. Edit the systemd service: `sudo systemctl edit telemetry.service`
|
||||
2. Add override configuration:
|
||||
```ini
|
||||
[Service]
|
||||
ExecStart=
|
||||
ExecStart=/home/user/seaweedfs-telemetry/bin/telemetry-server -port=8354
|
||||
```
|
||||
3. Reload and restart: `sudo systemctl daemon-reload && sudo systemctl restart telemetry.service`
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Firewall**: Consider restricting access to telemetry ports
|
||||
2. **SSH Keys**: Use dedicated SSH keys with minimal permissions
|
||||
3. **User Permissions**: Run the service as a non-privileged user
|
||||
4. **Network**: Consider running on internal networks only
|
||||
|
||||
## Monitoring
|
||||
|
||||
Monitor the deployment and service health:
|
||||
|
||||
- **GitHub Actions**: Check workflow runs for deployment status
|
||||
- **System Logs**: `sudo journalctl -u telemetry.service`
|
||||
- **Application Logs**: `tail -f ~/seaweedfs-telemetry/logs/telemetry.log`
|
||||
- **Health Endpoint**: `curl http://localhost:8353/health`
|
||||
- **Metrics**: `curl http://localhost:8353/metrics`
|
|
@ -1,353 +0,0 @@
|
|||
# SeaweedFS Telemetry System
|
||||
|
||||
A privacy-respecting telemetry system for SeaweedFS that collects cluster-level usage statistics and provides visualization through Prometheus and Grafana.
|
||||
|
||||
## Features
|
||||
|
||||
- **Privacy-First Design**: Uses in-memory cluster IDs (regenerated on restart), no personal data collection
|
||||
- **Prometheus Integration**: Native Prometheus metrics for monitoring and alerting
|
||||
- **Grafana Dashboards**: Pre-built dashboards for data visualization
|
||||
- **Protocol Buffers**: Efficient binary data transmission for optimal performance
|
||||
- **Opt-in Only**: Disabled by default, requires explicit configuration
|
||||
- **Docker Compose**: Complete monitoring stack deployment
|
||||
- **Automatic Cleanup**: Configurable data retention policies
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
SeaweedFS Cluster → Telemetry Client → Telemetry Server → Prometheus → Grafana
|
||||
(protobuf) (metrics) (queries)
|
||||
```
|
||||
|
||||
## Data Transmission
|
||||
|
||||
The telemetry system uses **Protocol Buffers exclusively** for efficient binary data transmission:
|
||||
|
||||
- **Compact Format**: 30-50% smaller than JSON
|
||||
- **Fast Serialization**: Better performance than text-based formats
|
||||
- **Type Safety**: Strong typing with generated Go structs
|
||||
- **Schema Evolution**: Built-in versioning support
|
||||
|
||||
### Protobuf Schema
|
||||
|
||||
```protobuf
|
||||
message TelemetryData {
|
||||
string cluster_id = 1; // In-memory generated UUID
|
||||
string version = 2; // SeaweedFS version
|
||||
string os = 3; // Operating system
|
||||
// Field 4 reserved (was features)
|
||||
// Field 5 reserved (was deployment)
|
||||
int32 volume_server_count = 6; // Number of volume servers
|
||||
uint64 total_disk_bytes = 7; // Total disk usage
|
||||
int32 total_volume_count = 8; // Total volume count
|
||||
int32 filer_count = 9; // Number of filer servers
|
||||
int32 broker_count = 10; // Number of broker servers
|
||||
int64 timestamp = 11; // Collection timestamp
|
||||
}
|
||||
```
|
||||
|
||||
## Privacy Approach
|
||||
|
||||
- **No Personal Data**: No hostnames, IP addresses, or user information
|
||||
- **In-Memory IDs**: Cluster IDs are generated in-memory and change on restart
|
||||
- **Aggregated Data**: Only cluster-level statistics, no individual file/user data
|
||||
- **Opt-in Only**: Telemetry is disabled by default
|
||||
- **Transparent**: Open source implementation, clear data collection policy
|
||||
|
||||
## Collected Data
|
||||
|
||||
| Field | Description | Example |
|
||||
|-------|-------------|---------|
|
||||
| `cluster_id` | In-memory UUID (changes on restart) | `a1b2c3d4-...` |
|
||||
| `version` | SeaweedFS version | `3.45` |
|
||||
| `os` | Operating system and architecture | `linux/amd64` |
|
||||
| `volume_server_count` | Number of volume servers | `5` |
|
||||
| `total_disk_bytes` | Total disk usage across cluster | `1073741824` |
|
||||
| `total_volume_count` | Total number of volumes | `120` |
|
||||
| `filer_count` | Number of filer servers | `2` |
|
||||
| `broker_count` | Number of broker servers | `1` |
|
||||
| `timestamp` | When data was collected | `1640995200` |
|
||||
|
||||
## Quick Start
|
||||
|
||||
### 1. Deploy Telemetry Server
|
||||
|
||||
```bash
|
||||
# Clone and start the complete monitoring stack
|
||||
git clone https://github.com/seaweedfs/seaweedfs.git
|
||||
cd seaweedfs/telemetry
|
||||
docker-compose up -d
|
||||
|
||||
# Or run the server directly
|
||||
cd server
|
||||
go run . -port=8080 -dashboard=true
|
||||
```
|
||||
|
||||
### 2. Configure SeaweedFS
|
||||
|
||||
```bash
|
||||
# Enable telemetry in SeaweedFS master (uses default telemetry.seaweedfs.com)
|
||||
weed master -telemetry=true
|
||||
|
||||
# Or in server mode
|
||||
weed server -telemetry=true
|
||||
|
||||
# Or specify custom telemetry server
|
||||
weed master -telemetry=true -telemetry.url=http://localhost:8080/api/collect
|
||||
```
|
||||
|
||||
### 3. Access Dashboards
|
||||
|
||||
- **Telemetry Server**: http://localhost:8080
|
||||
- **Prometheus**: http://localhost:9090
|
||||
- **Grafana**: http://localhost:3000 (admin/admin)
|
||||
|
||||
## Configuration
|
||||
|
||||
### SeaweedFS Master/Server
|
||||
|
||||
```bash
|
||||
# Enable telemetry
|
||||
-telemetry=true
|
||||
|
||||
# Set custom telemetry server URL (optional, defaults to telemetry.seaweedfs.com)
|
||||
-telemetry.url=http://your-telemetry-server:8080/api/collect
|
||||
```
|
||||
|
||||
### Telemetry Server
|
||||
|
||||
```bash
|
||||
# Server configuration
|
||||
-port=8080 # Server port
|
||||
-dashboard=true # Enable built-in dashboard
|
||||
-cleanup=24h # Cleanup interval
|
||||
-max-age=720h # Maximum data retention (30 days)
|
||||
|
||||
# Example
|
||||
./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h
|
||||
```
|
||||
|
||||
## Prometheus Metrics
|
||||
|
||||
The telemetry server exposes these Prometheus metrics:
|
||||
|
||||
### Cluster Metrics
|
||||
- `seaweedfs_telemetry_total_clusters`: Total unique clusters (30 days)
|
||||
- `seaweedfs_telemetry_active_clusters`: Active clusters (7 days)
|
||||
|
||||
### Per-Cluster Metrics
|
||||
- `seaweedfs_telemetry_volume_servers{cluster_id, version, os}`: Volume servers per cluster
|
||||
- `seaweedfs_telemetry_disk_bytes{cluster_id, version, os}`: Disk usage per cluster
|
||||
- `seaweedfs_telemetry_volume_count{cluster_id, version, os}`: Volume count per cluster
|
||||
- `seaweedfs_telemetry_filer_count{cluster_id, version, os}`: Filer servers per cluster
|
||||
- `seaweedfs_telemetry_broker_count{cluster_id, version, os}`: Broker servers per cluster
|
||||
- `seaweedfs_telemetry_cluster_info{cluster_id, version, os}`: Cluster metadata
|
||||
|
||||
### Server Metrics
|
||||
- `seaweedfs_telemetry_reports_received_total`: Total telemetry reports received
|
||||
|
||||
## API Endpoints
|
||||
|
||||
### Data Collection
|
||||
```bash
|
||||
# Submit telemetry data (protobuf only)
|
||||
POST /api/collect
|
||||
Content-Type: application/x-protobuf
|
||||
[TelemetryRequest protobuf data]
|
||||
```
|
||||
|
||||
### Statistics (JSON for dashboard/debugging)
|
||||
```bash
|
||||
# Get aggregated statistics
|
||||
GET /api/stats
|
||||
|
||||
# Get recent cluster instances
|
||||
GET /api/instances?limit=100
|
||||
|
||||
# Get metrics over time
|
||||
GET /api/metrics?days=30
|
||||
```
|
||||
|
||||
### Monitoring
|
||||
```bash
|
||||
# Prometheus metrics
|
||||
GET /metrics
|
||||
```
|
||||
|
||||
## Docker Deployment
|
||||
|
||||
### Complete Stack (Recommended)
|
||||
|
||||
```yaml
|
||||
# docker-compose.yml
|
||||
version: '3.8'
|
||||
services:
|
||||
telemetry-server:
|
||||
build: ./server
|
||||
ports:
|
||||
- "8080:8080"
|
||||
command: ["-port=8080", "-dashboard=true", "-cleanup=24h"]
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
volumes:
|
||||
- ./grafana-provisioning:/etc/grafana/provisioning
|
||||
- ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs.json
|
||||
```
|
||||
|
||||
```bash
|
||||
# Deploy the stack
|
||||
docker-compose up -d
|
||||
|
||||
# Scale telemetry server if needed
|
||||
docker-compose up -d --scale telemetry-server=3
|
||||
```
|
||||
|
||||
### Server Only
|
||||
|
||||
```bash
|
||||
# Build and run telemetry server
|
||||
cd server
|
||||
docker build -t seaweedfs-telemetry .
|
||||
docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Protocol Buffer Development
|
||||
|
||||
```bash
|
||||
# Generate protobuf code
|
||||
cd telemetry
|
||||
protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto
|
||||
|
||||
# The generated code is already included in the repository
|
||||
```
|
||||
|
||||
### Build from Source
|
||||
|
||||
```bash
|
||||
# Build telemetry server
|
||||
cd telemetry/server
|
||||
go build -o telemetry-server .
|
||||
|
||||
# Build SeaweedFS with telemetry support
|
||||
cd ../..
|
||||
go build -o weed ./weed
|
||||
```
|
||||
|
||||
### Testing
|
||||
|
||||
```bash
|
||||
# Test telemetry server
|
||||
cd telemetry/server
|
||||
go test ./...
|
||||
|
||||
# Test protobuf communication (requires protobuf tools)
|
||||
# See telemetry client code for examples
|
||||
```
|
||||
|
||||
## Grafana Dashboard
|
||||
|
||||
The included Grafana dashboard provides:
|
||||
|
||||
- **Overview**: Total and active clusters, version distribution
|
||||
- **Resource Usage**: Volume servers and disk usage over time
|
||||
- **Infrastructure**: Operating system distribution and server counts
|
||||
- **Growth Trends**: Historical growth patterns
|
||||
|
||||
### Custom Queries
|
||||
|
||||
```promql
|
||||
# Total active clusters
|
||||
seaweedfs_telemetry_active_clusters
|
||||
|
||||
# Disk usage by version
|
||||
sum by (version) (seaweedfs_telemetry_disk_bytes)
|
||||
|
||||
# Volume servers by operating system
|
||||
sum by (os) (seaweedfs_telemetry_volume_servers)
|
||||
|
||||
# Filer servers by version
|
||||
sum by (version) (seaweedfs_telemetry_filer_count)
|
||||
|
||||
# Broker servers across all clusters
|
||||
sum(seaweedfs_telemetry_broker_count)
|
||||
|
||||
# Growth rate (weekly)
|
||||
increase(seaweedfs_telemetry_total_clusters[7d])
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
- **Network Security**: Use HTTPS in production environments
|
||||
- **Access Control**: Implement authentication for Grafana and Prometheus
|
||||
- **Data Retention**: Configure appropriate retention policies
|
||||
- **Monitoring**: Monitor the telemetry infrastructure itself
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**SeaweedFS not sending data:**
|
||||
```bash
|
||||
# Check telemetry configuration
|
||||
weed master -h | grep telemetry
|
||||
|
||||
# Verify connectivity
|
||||
curl -v http://your-telemetry-server:8080/api/collect
|
||||
```
|
||||
|
||||
**Server not receiving data:**
|
||||
```bash
|
||||
# Check server logs
|
||||
docker-compose logs telemetry-server
|
||||
|
||||
# Verify metrics endpoint
|
||||
curl http://localhost:8080/metrics
|
||||
```
|
||||
|
||||
**Prometheus not scraping:**
|
||||
```bash
|
||||
# Check Prometheus targets
|
||||
curl http://localhost:9090/api/v1/targets
|
||||
|
||||
# Verify configuration
|
||||
docker-compose logs prometheus
|
||||
```
|
||||
|
||||
### Debugging
|
||||
|
||||
```bash
|
||||
# Enable verbose logging in SeaweedFS
|
||||
weed master -v=2 -telemetry=true
|
||||
|
||||
# Check telemetry server metrics
|
||||
curl http://localhost:8080/metrics | grep seaweedfs_telemetry
|
||||
|
||||
# Test data flow
|
||||
curl http://localhost:8080/api/stats
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
1. Fork the repository
|
||||
2. Create a feature branch
|
||||
3. Make your changes
|
||||
4. Add tests if applicable
|
||||
5. Submit a pull request
|
||||
|
||||
## License
|
||||
|
||||
This telemetry system is part of SeaweedFS and follows the same Apache 2.0 license.
|
|
@ -1,55 +0,0 @@
|
|||
version: '3.8'
|
||||
|
||||
services:
|
||||
telemetry-server:
|
||||
build: ./server
|
||||
ports:
|
||||
- "8080:8080"
|
||||
command: [
|
||||
"./telemetry-server",
|
||||
"-port=8080",
|
||||
"-dashboard=false", # Disable built-in dashboard, use Grafana
|
||||
"-log=true",
|
||||
"-cors=true"
|
||||
]
|
||||
networks:
|
||||
- telemetry
|
||||
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
ports:
|
||||
- "9090:9090"
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--storage.tsdb.retention.time=200h'
|
||||
- '--web.enable-lifecycle'
|
||||
networks:
|
||||
- telemetry
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
ports:
|
||||
- "3000:3000"
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_USERS_ALLOW_SIGN_UP=false
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs-telemetry.json
|
||||
- ./grafana-provisioning:/etc/grafana/provisioning
|
||||
networks:
|
||||
- telemetry
|
||||
|
||||
volumes:
|
||||
prometheus_data:
|
||||
grafana_data:
|
||||
|
||||
networks:
|
||||
telemetry:
|
||||
driver: bridge
|
|
@ -1,734 +0,0 @@
|
|||
{
|
||||
"annotations": {
|
||||
"list": [
|
||||
{
|
||||
"builtIn": 1,
|
||||
"datasource": {
|
||||
"type": "grafana",
|
||||
"uid": "-- Grafana --"
|
||||
},
|
||||
"enable": true,
|
||||
"hide": true,
|
||||
"iconColor": "rgba(0, 211, 255, 1)",
|
||||
"name": "Annotations & Alerts",
|
||||
"type": "dashboard"
|
||||
}
|
||||
]
|
||||
},
|
||||
"editable": true,
|
||||
"fiscalYearStartMonth": 0,
|
||||
"graphTooltip": 0,
|
||||
"id": null,
|
||||
"links": [],
|
||||
"liveNow": false,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 0
|
||||
},
|
||||
"id": 1,
|
||||
"options": {
|
||||
"showHeader": true
|
||||
},
|
||||
"pluginVersion": "10.0.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "seaweedfs_telemetry_total_clusters",
|
||||
"format": "time_series",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Total SeaweedFS Clusters",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "thresholds"
|
||||
},
|
||||
"custom": {
|
||||
"align": "auto",
|
||||
"cellOptions": {
|
||||
"type": "auto"
|
||||
},
|
||||
"inspect": false
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 0
|
||||
},
|
||||
"id": 2,
|
||||
"options": {
|
||||
"showHeader": true
|
||||
},
|
||||
"pluginVersion": "10.0.0",
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "seaweedfs_telemetry_active_clusters",
|
||||
"format": "time_series",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Active Clusters (7 days)",
|
||||
"type": "stat"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 8
|
||||
},
|
||||
"id": 3,
|
||||
"options": {
|
||||
"legend": {
|
||||
"displayMode": "visible",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"pieType": "pie",
|
||||
"reduceOptions": {
|
||||
"values": false,
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": ""
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "count by (version) (seaweedfs_telemetry_cluster_info)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "{{version}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "SeaweedFS Version Distribution",
|
||||
"type": "piechart"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
}
|
||||
},
|
||||
"mappings": []
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 8
|
||||
},
|
||||
"id": 4,
|
||||
"options": {
|
||||
"legend": {
|
||||
"displayMode": "visible",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"pieType": "pie",
|
||||
"reduceOptions": {
|
||||
"values": false,
|
||||
"calcs": [
|
||||
"lastNotNull"
|
||||
],
|
||||
"fields": ""
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "count by (os) (seaweedfs_telemetry_cluster_info)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "{{os}}",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Operating System Distribution",
|
||||
"type": "piechart"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 24,
|
||||
"x": 0,
|
||||
"y": 16
|
||||
},
|
||||
"id": 5,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(seaweedfs_telemetry_volume_servers)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "Total Volume Servers",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Total Volume Servers Over Time",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
},
|
||||
"unit": "bytes"
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 24
|
||||
},
|
||||
"id": 6,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(seaweedfs_telemetry_disk_bytes)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "Total Disk Usage",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Total Disk Usage Over Time",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 24
|
||||
},
|
||||
"id": 7,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(seaweedfs_telemetry_volume_count)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "Total Volume Count",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Total Volume Count Over Time",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 0,
|
||||
"y": 32
|
||||
},
|
||||
"id": 8,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(seaweedfs_telemetry_filer_count)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "Total Filer Count",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Total Filer Servers Over Time",
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
"mode": "palette-classic"
|
||||
},
|
||||
"custom": {
|
||||
"axisLabel": "",
|
||||
"axisPlacement": "auto",
|
||||
"barAlignment": 0,
|
||||
"drawStyle": "line",
|
||||
"fillOpacity": 0,
|
||||
"gradientMode": "none",
|
||||
"hideFrom": {
|
||||
"legend": false,
|
||||
"tooltip": false,
|
||||
"vis": false
|
||||
},
|
||||
"lineInterpolation": "linear",
|
||||
"lineWidth": 1,
|
||||
"pointSize": 5,
|
||||
"scaleDistribution": {
|
||||
"type": "linear"
|
||||
},
|
||||
"showPoints": "auto",
|
||||
"spanNulls": false,
|
||||
"stacking": {
|
||||
"group": "A",
|
||||
"mode": "none"
|
||||
},
|
||||
"thresholdsStyle": {
|
||||
"mode": "off"
|
||||
}
|
||||
},
|
||||
"mappings": [],
|
||||
"thresholds": {
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
"value": 80
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"gridPos": {
|
||||
"h": 8,
|
||||
"w": 12,
|
||||
"x": 12,
|
||||
"y": 32
|
||||
},
|
||||
"id": 9,
|
||||
"options": {
|
||||
"legend": {
|
||||
"calcs": [],
|
||||
"displayMode": "list",
|
||||
"placement": "bottom",
|
||||
"showLegend": true
|
||||
},
|
||||
"tooltip": {
|
||||
"mode": "single",
|
||||
"sort": "none"
|
||||
}
|
||||
},
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"type": "prometheus",
|
||||
"uid": "${DS_PROMETHEUS}"
|
||||
},
|
||||
"expr": "sum(seaweedfs_telemetry_broker_count)",
|
||||
"format": "time_series",
|
||||
"legendFormat": "Total Broker Count",
|
||||
"refId": "A"
|
||||
}
|
||||
],
|
||||
"title": "Total Broker Servers Over Time",
|
||||
"type": "timeseries"
|
||||
}
|
||||
],
|
||||
"refresh": "5m",
|
||||
"schemaVersion": 38,
|
||||
"style": "dark",
|
||||
"tags": [
|
||||
"seaweedfs",
|
||||
"telemetry"
|
||||
],
|
||||
"templating": {
|
||||
"list": []
|
||||
},
|
||||
"time": {
|
||||
"from": "now-24h",
|
||||
"to": "now"
|
||||
},
|
||||
"timepicker": {},
|
||||
"timezone": "",
|
||||
"title": "SeaweedFS Telemetry Dashboard",
|
||||
"uid": "seaweedfs-telemetry",
|
||||
"version": 1,
|
||||
"weekStart": ""
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'seaweedfs'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
disableDeletion: false
|
||||
updateIntervalSeconds: 10
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
|
@ -1,9 +0,0 @@
|
|||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
url: http://prometheus:9090
|
||||
isDefault: true
|
||||
editable: true
|
|
@ -1,15 +0,0 @@
|
|||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
|
||||
rule_files:
|
||||
# - "first_rules.yml"
|
||||
# - "second_rules.yml"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'seaweedfs-telemetry'
|
||||
static_configs:
|
||||
- targets: ['telemetry-server:8080']
|
||||
scrape_interval: 30s
|
||||
metrics_path: '/metrics'
|
||||
scrape_timeout: 10s
|
|
@ -1,377 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc v5.29.3
|
||||
// source: telemetry.proto
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// TelemetryData represents cluster-level telemetry information
|
||||
type TelemetryData struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Unique cluster identifier (generated in-memory)
|
||||
ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
|
||||
// SeaweedFS version
|
||||
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||
// Operating system (e.g., "linux/amd64")
|
||||
Os string `protobuf:"bytes,3,opt,name=os,proto3" json:"os,omitempty"`
|
||||
// Number of volume servers in the cluster
|
||||
VolumeServerCount int32 `protobuf:"varint,6,opt,name=volume_server_count,json=volumeServerCount,proto3" json:"volume_server_count,omitempty"`
|
||||
// Total disk usage across all volume servers (in bytes)
|
||||
TotalDiskBytes uint64 `protobuf:"varint,7,opt,name=total_disk_bytes,json=totalDiskBytes,proto3" json:"total_disk_bytes,omitempty"`
|
||||
// Total number of volumes in the cluster
|
||||
TotalVolumeCount int32 `protobuf:"varint,8,opt,name=total_volume_count,json=totalVolumeCount,proto3" json:"total_volume_count,omitempty"`
|
||||
// Number of filer servers in the cluster
|
||||
FilerCount int32 `protobuf:"varint,9,opt,name=filer_count,json=filerCount,proto3" json:"filer_count,omitempty"`
|
||||
// Number of broker servers in the cluster
|
||||
BrokerCount int32 `protobuf:"varint,10,opt,name=broker_count,json=brokerCount,proto3" json:"broker_count,omitempty"`
|
||||
// Unix timestamp when the data was collected
|
||||
Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TelemetryData) Reset() {
|
||||
*x = TelemetryData{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_telemetry_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TelemetryData) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TelemetryData) ProtoMessage() {}
|
||||
|
||||
func (x *TelemetryData) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_telemetry_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TelemetryData.ProtoReflect.Descriptor instead.
|
||||
func (*TelemetryData) Descriptor() ([]byte, []int) {
|
||||
return file_telemetry_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetClusterId() string {
|
||||
if x != nil {
|
||||
return x.ClusterId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetVersion() string {
|
||||
if x != nil {
|
||||
return x.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetOs() string {
|
||||
if x != nil {
|
||||
return x.Os
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetVolumeServerCount() int32 {
|
||||
if x != nil {
|
||||
return x.VolumeServerCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetTotalDiskBytes() uint64 {
|
||||
if x != nil {
|
||||
return x.TotalDiskBytes
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetTotalVolumeCount() int32 {
|
||||
if x != nil {
|
||||
return x.TotalVolumeCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetFilerCount() int32 {
|
||||
if x != nil {
|
||||
return x.FilerCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetBrokerCount() int32 {
|
||||
if x != nil {
|
||||
return x.BrokerCount
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *TelemetryData) GetTimestamp() int64 {
|
||||
if x != nil {
|
||||
return x.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server
|
||||
type TelemetryRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Data *TelemetryData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TelemetryRequest) Reset() {
|
||||
*x = TelemetryRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_telemetry_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TelemetryRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TelemetryRequest) ProtoMessage() {}
|
||||
|
||||
func (x *TelemetryRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_telemetry_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TelemetryRequest.ProtoReflect.Descriptor instead.
|
||||
func (*TelemetryRequest) Descriptor() ([]byte, []int) {
|
||||
return file_telemetry_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *TelemetryRequest) GetData() *TelemetryData {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TelemetryResponse is returned by the telemetry server
|
||||
type TelemetryResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *TelemetryResponse) Reset() {
|
||||
*x = TelemetryResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_telemetry_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *TelemetryResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*TelemetryResponse) ProtoMessage() {}
|
||||
|
||||
func (x *TelemetryResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_telemetry_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use TelemetryResponse.ProtoReflect.Descriptor instead.
|
||||
func (*TelemetryResponse) Descriptor() ([]byte, []int) {
|
||||
return file_telemetry_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *TelemetryResponse) GetSuccess() bool {
|
||||
if x != nil {
|
||||
return x.Success
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *TelemetryResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var File_telemetry_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_telemetry_proto_rawDesc = []byte{
|
||||
0x0a, 0x0f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x12, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xce, 0x02, 0x0a,
|
||||
0x0d, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d,
|
||||
0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a,
|
||||
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
|
||||
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
|
||||
0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76,
|
||||
0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c,
|
||||
0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
|
||||
0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x42, 0x79, 0x74, 0x65,
|
||||
0x73, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
|
||||
0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x74,
|
||||
0x6f, 0x74, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
|
||||
0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09,
|
||||
0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x12, 0x21, 0x0a, 0x0c, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||
0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x43, 0x6f,
|
||||
0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
|
||||
0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x40, 0x0a,
|
||||
0x10, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||
0x74, 0x12, 0x2c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||
0x18, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x54, 0x65, 0x6c, 0x65,
|
||||
0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22,
|
||||
0x47, 0x0a, 0x11, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68,
|
||||
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
|
||||
0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d,
|
||||
0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_telemetry_proto_rawDescOnce sync.Once
|
||||
file_telemetry_proto_rawDescData = file_telemetry_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_telemetry_proto_rawDescGZIP() []byte {
|
||||
file_telemetry_proto_rawDescOnce.Do(func() {
|
||||
file_telemetry_proto_rawDescData = protoimpl.X.CompressGZIP(file_telemetry_proto_rawDescData)
|
||||
})
|
||||
return file_telemetry_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_telemetry_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||
var file_telemetry_proto_goTypes = []any{
|
||||
(*TelemetryData)(nil), // 0: telemetry.TelemetryData
|
||||
(*TelemetryRequest)(nil), // 1: telemetry.TelemetryRequest
|
||||
(*TelemetryResponse)(nil), // 2: telemetry.TelemetryResponse
|
||||
}
|
||||
var file_telemetry_proto_depIdxs = []int32{
|
||||
0, // 0: telemetry.TelemetryRequest.data:type_name -> telemetry.TelemetryData
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_telemetry_proto_init() }
|
||||
func file_telemetry_proto_init() {
|
||||
if File_telemetry_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_telemetry_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*TelemetryData); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_telemetry_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*TelemetryRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_telemetry_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*TelemetryResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_telemetry_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 3,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_telemetry_proto_goTypes,
|
||||
DependencyIndexes: file_telemetry_proto_depIdxs,
|
||||
MessageInfos: file_telemetry_proto_msgTypes,
|
||||
}.Build()
|
||||
File_telemetry_proto = out.File
|
||||
file_telemetry_proto_rawDesc = nil
|
||||
file_telemetry_proto_goTypes = nil
|
||||
file_telemetry_proto_depIdxs = nil
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package telemetry;
|
||||
|
||||
option go_package = "github.com/seaweedfs/seaweedfs/telemetry/proto";
|
||||
|
||||
// TelemetryData represents cluster-level telemetry information
|
||||
message TelemetryData {
|
||||
// Unique cluster identifier (generated in-memory)
|
||||
string cluster_id = 1;
|
||||
|
||||
// SeaweedFS version
|
||||
string version = 2;
|
||||
|
||||
// Operating system (e.g., "linux/amd64")
|
||||
string os = 3;
|
||||
|
||||
// Field 4 reserved (was features)
|
||||
reserved 4;
|
||||
|
||||
// Field 5 reserved (was deployment)
|
||||
reserved 5;
|
||||
|
||||
// Number of volume servers in the cluster
|
||||
int32 volume_server_count = 6;
|
||||
|
||||
// Total disk usage across all volume servers (in bytes)
|
||||
uint64 total_disk_bytes = 7;
|
||||
|
||||
// Total number of volumes in the cluster
|
||||
int32 total_volume_count = 8;
|
||||
|
||||
// Number of filer servers in the cluster
|
||||
int32 filer_count = 9;
|
||||
|
||||
// Number of broker servers in the cluster
|
||||
int32 broker_count = 10;
|
||||
|
||||
// Unix timestamp when the data was collected
|
||||
int64 timestamp = 11;
|
||||
}
|
||||
|
||||
// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server
|
||||
message TelemetryRequest {
|
||||
TelemetryData data = 1;
|
||||
}
|
||||
|
||||
// TelemetryResponse is returned by the telemetry server
|
||||
message TelemetryResponse {
|
||||
bool success = 1;
|
||||
string message = 2;
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
FROM golang:1.21-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o telemetry-server .
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates
|
||||
WORKDIR /root/
|
||||
|
||||
COPY --from=builder /app/telemetry-server .
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
CMD ["./telemetry-server"]
|
|
@ -1,97 +0,0 @@
|
|||
.PHONY: build run clean test deps proto integration-test test-all
|
||||
|
||||
# Build the telemetry server
|
||||
build:
|
||||
go build -o telemetry-server .
|
||||
|
||||
# Run the server in development mode
|
||||
run:
|
||||
go run . -port=8080 -dashboard=true -cleanup=1h -max-age=24h
|
||||
|
||||
# Run the server in production mode
|
||||
run-prod:
|
||||
./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
rm -f telemetry-server
|
||||
rm -f ../test/telemetry-server-test.log
|
||||
go clean
|
||||
|
||||
# Run unit tests
|
||||
test:
|
||||
go test ./...
|
||||
|
||||
# Run integration tests
|
||||
integration-test:
|
||||
@echo "🧪 Running telemetry integration tests..."
|
||||
cd ../../ && go run telemetry/test/integration.go
|
||||
|
||||
# Run all tests (unit + integration)
|
||||
test-all: test integration-test
|
||||
|
||||
# Install dependencies
|
||||
deps:
|
||||
go mod download
|
||||
go mod tidy
|
||||
|
||||
# Generate protobuf code (requires protoc)
|
||||
proto:
|
||||
cd .. && protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto
|
||||
|
||||
# Build Docker image
|
||||
docker-build:
|
||||
docker build -t seaweedfs-telemetry .
|
||||
|
||||
# Run with Docker
|
||||
docker-run:
|
||||
docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true
|
||||
|
||||
# Development with auto-reload (requires air: go install github.com/cosmtrek/air@latest)
|
||||
dev:
|
||||
air
|
||||
|
||||
# Check if protoc is available
|
||||
check-protoc:
|
||||
@which protoc > /dev/null || (echo "protoc is required for proto generation. Install from https://grpc.io/docs/protoc-installation/" && exit 1)
|
||||
|
||||
# Full development setup
|
||||
setup: check-protoc deps proto build
|
||||
|
||||
# Run a quick smoke test
|
||||
smoke-test: build
|
||||
@echo "🔥 Running smoke test..."
|
||||
@timeout 10s ./telemetry-server -port=18081 > /dev/null 2>&1 & \
|
||||
SERVER_PID=$$!; \
|
||||
sleep 2; \
|
||||
if curl -s http://localhost:18081/health > /dev/null; then \
|
||||
echo "✅ Smoke test passed - server responds to health check"; \
|
||||
else \
|
||||
echo "❌ Smoke test failed - server not responding"; \
|
||||
exit 1; \
|
||||
fi; \
|
||||
kill $$SERVER_PID 2>/dev/null || true
|
||||
|
||||
# Continuous integration target
|
||||
ci: deps proto build test integration-test
|
||||
@echo "🎉 All CI tests passed!"
|
||||
|
||||
# Help
|
||||
help:
|
||||
@echo "Available targets:"
|
||||
@echo " build - Build the telemetry server binary"
|
||||
@echo " run - Run server in development mode"
|
||||
@echo " run-prod - Run server in production mode"
|
||||
@echo " clean - Clean build artifacts"
|
||||
@echo " test - Run unit tests"
|
||||
@echo " integration-test- Run integration tests"
|
||||
@echo " test-all - Run all tests (unit + integration)"
|
||||
@echo " deps - Install Go dependencies"
|
||||
@echo " proto - Generate protobuf code"
|
||||
@echo " docker-build - Build Docker image"
|
||||
@echo " docker-run - Run with Docker"
|
||||
@echo " dev - Run with auto-reload (requires air)"
|
||||
@echo " smoke-test - Quick server health check"
|
||||
@echo " setup - Full development setup"
|
||||
@echo " ci - Continuous integration (all tests)"
|
||||
@echo " help - Show this help"
|
|
@ -1,152 +0,0 @@
|
|||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/proto"
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/server/storage"
|
||||
protobuf "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
storage *storage.PrometheusStorage
|
||||
}
|
||||
|
||||
func NewHandler(storage *storage.PrometheusStorage) *Handler {
|
||||
return &Handler{storage: storage}
|
||||
}
|
||||
|
||||
func (h *Handler) CollectTelemetry(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := r.Header.Get("Content-Type")
|
||||
|
||||
// Only accept protobuf content type
|
||||
if contentType != "application/x-protobuf" && contentType != "application/protobuf" {
|
||||
http.Error(w, "Content-Type must be application/x-protobuf", http.StatusUnsupportedMediaType)
|
||||
return
|
||||
}
|
||||
|
||||
// Read protobuf request
|
||||
body, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
req := &proto.TelemetryRequest{}
|
||||
if err := protobuf.Unmarshal(body, req); err != nil {
|
||||
http.Error(w, "Invalid protobuf data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
data := req.Data
|
||||
if data == nil {
|
||||
http.Error(w, "Missing telemetry data", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if data.ClusterId == "" || data.Version == "" || data.Os == "" {
|
||||
http.Error(w, "Missing required fields", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Set timestamp if not provided
|
||||
if data.Timestamp == 0 {
|
||||
data.Timestamp = time.Now().Unix()
|
||||
}
|
||||
|
||||
// Store the telemetry data
|
||||
if err := h.storage.StoreTelemetry(data); err != nil {
|
||||
http.Error(w, "Failed to store data", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Return protobuf response
|
||||
resp := &proto.TelemetryResponse{
|
||||
Success: true,
|
||||
Message: "Telemetry data received",
|
||||
}
|
||||
|
||||
respData, err := protobuf.Marshal(resp)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to marshal response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/x-protobuf")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(respData)
|
||||
}
|
||||
|
||||
func (h *Handler) GetStats(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
stats, err := h.storage.GetStats()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get stats", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(stats)
|
||||
}
|
||||
|
||||
func (h *Handler) GetInstances(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
limitStr := r.URL.Query().Get("limit")
|
||||
limit := 100 // default
|
||||
if limitStr != "" {
|
||||
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 {
|
||||
limit = l
|
||||
}
|
||||
}
|
||||
|
||||
instances, err := h.storage.GetInstances(limit)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get instances", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(instances)
|
||||
}
|
||||
|
||||
func (h *Handler) GetMetrics(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
daysStr := r.URL.Query().Get("days")
|
||||
days := 30 // default
|
||||
if daysStr != "" {
|
||||
if d, err := strconv.Atoi(daysStr); err == nil && d > 0 && d <= 365 {
|
||||
days = d
|
||||
}
|
||||
}
|
||||
|
||||
metrics, err := h.storage.GetMetrics(days)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to get metrics", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(metrics)
|
||||
}
|
|
@ -1,274 +0,0 @@
|
|||
package dashboard
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type Handler struct{}
|
||||
|
||||
func NewHandler() *Handler {
|
||||
return &Handler{}
|
||||
}
|
||||
|
||||
func (h *Handler) ServeIndex(w http.ResponseWriter, r *http.Request) {
|
||||
html := `<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>SeaweedFS Telemetry Dashboard</title>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
background-color: #f5f5f5;
|
||||
}
|
||||
.container {
|
||||
max-width: 1200px;
|
||||
margin: 0 auto;
|
||||
}
|
||||
.header {
|
||||
background: white;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 20px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
}
|
||||
.stats-grid {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
||||
gap: 20px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.stat-card {
|
||||
background: white;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
}
|
||||
.stat-value {
|
||||
font-size: 2em;
|
||||
font-weight: bold;
|
||||
color: #2196F3;
|
||||
}
|
||||
.stat-label {
|
||||
color: #666;
|
||||
margin-top: 5px;
|
||||
}
|
||||
.chart-container {
|
||||
background: white;
|
||||
padding: 20px;
|
||||
border-radius: 8px;
|
||||
margin-bottom: 20px;
|
||||
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||
}
|
||||
.chart-title {
|
||||
font-size: 1.2em;
|
||||
font-weight: bold;
|
||||
margin-bottom: 15px;
|
||||
}
|
||||
.loading {
|
||||
text-align: center;
|
||||
padding: 40px;
|
||||
color: #666;
|
||||
}
|
||||
.error {
|
||||
background: #ffebee;
|
||||
color: #c62828;
|
||||
padding: 15px;
|
||||
border-radius: 4px;
|
||||
margin: 10px 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="header">
|
||||
<h1>SeaweedFS Telemetry Dashboard</h1>
|
||||
<p>Privacy-respecting usage analytics for SeaweedFS</p>
|
||||
</div>
|
||||
|
||||
<div id="loading" class="loading">Loading telemetry data...</div>
|
||||
<div id="error" class="error" style="display: none;"></div>
|
||||
|
||||
<div id="dashboard" style="display: none;">
|
||||
<div class="stats-grid">
|
||||
<div class="stat-card">
|
||||
<div class="stat-value" id="totalInstances">-</div>
|
||||
<div class="stat-label">Total Instances (30 days)</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-value" id="activeInstances">-</div>
|
||||
<div class="stat-label">Active Instances (7 days)</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-value" id="totalVersions">-</div>
|
||||
<div class="stat-label">Different Versions</div>
|
||||
</div>
|
||||
<div class="stat-card">
|
||||
<div class="stat-value" id="totalOS">-</div>
|
||||
<div class="stat-label">Operating Systems</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="chart-container">
|
||||
<div class="chart-title">Version Distribution</div>
|
||||
<canvas id="versionChart" width="400" height="200"></canvas>
|
||||
</div>
|
||||
|
||||
<div class="chart-container">
|
||||
<div class="chart-title">Operating System Distribution</div>
|
||||
<canvas id="osChart" width="400" height="200"></canvas>
|
||||
</div>
|
||||
|
||||
|
||||
|
||||
<div class="chart-container">
|
||||
<div class="chart-title">Volume Servers Over Time</div>
|
||||
<canvas id="serverChart" width="400" height="200"></canvas>
|
||||
</div>
|
||||
|
||||
<div class="chart-container">
|
||||
<div class="chart-title">Total Disk Usage Over Time</div>
|
||||
<canvas id="diskChart" width="400" height="200"></canvas>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let charts = {};
|
||||
|
||||
async function loadDashboard() {
|
||||
try {
|
||||
// Load stats
|
||||
const statsResponse = await fetch('/api/stats');
|
||||
const stats = await statsResponse.json();
|
||||
|
||||
// Load metrics
|
||||
const metricsResponse = await fetch('/api/metrics?days=30');
|
||||
const metrics = await metricsResponse.json();
|
||||
|
||||
updateStats(stats);
|
||||
updateCharts(stats, metrics);
|
||||
|
||||
document.getElementById('loading').style.display = 'none';
|
||||
document.getElementById('dashboard').style.display = 'block';
|
||||
} catch (error) {
|
||||
console.error('Error loading dashboard:', error);
|
||||
showError('Failed to load telemetry data: ' + error.message);
|
||||
}
|
||||
}
|
||||
|
||||
function updateStats(stats) {
|
||||
document.getElementById('totalInstances').textContent = stats.total_instances || 0;
|
||||
document.getElementById('activeInstances').textContent = stats.active_instances || 0;
|
||||
document.getElementById('totalVersions').textContent = Object.keys(stats.versions || {}).length;
|
||||
document.getElementById('totalOS').textContent = Object.keys(stats.os_distribution || {}).length;
|
||||
}
|
||||
|
||||
function updateCharts(stats, metrics) {
|
||||
// Version chart
|
||||
createPieChart('versionChart', 'Version Distribution', stats.versions || {});
|
||||
|
||||
// OS chart
|
||||
createPieChart('osChart', 'Operating System Distribution', stats.os_distribution || {});
|
||||
|
||||
|
||||
|
||||
// Server count over time
|
||||
if (metrics.dates && metrics.server_counts) {
|
||||
createLineChart('serverChart', 'Volume Servers', metrics.dates, metrics.server_counts, '#2196F3');
|
||||
}
|
||||
|
||||
// Disk usage over time
|
||||
if (metrics.dates && metrics.disk_usage) {
|
||||
const diskUsageGB = metrics.disk_usage.map(bytes => Math.round(bytes / (1024 * 1024 * 1024)));
|
||||
createLineChart('diskChart', 'Disk Usage (GB)', metrics.dates, diskUsageGB, '#4CAF50');
|
||||
}
|
||||
}
|
||||
|
||||
function createPieChart(canvasId, title, data) {
|
||||
const ctx = document.getElementById(canvasId).getContext('2d');
|
||||
|
||||
if (charts[canvasId]) {
|
||||
charts[canvasId].destroy();
|
||||
}
|
||||
|
||||
const labels = Object.keys(data);
|
||||
const values = Object.values(data);
|
||||
|
||||
charts[canvasId] = new Chart(ctx, {
|
||||
type: 'pie',
|
||||
data: {
|
||||
labels: labels,
|
||||
datasets: [{
|
||||
data: values,
|
||||
backgroundColor: [
|
||||
'#FF6384', '#36A2EB', '#FFCE56', '#4BC0C0',
|
||||
'#9966FF', '#FF9F40', '#FF6384', '#C9CBCF'
|
||||
]
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
plugins: {
|
||||
legend: {
|
||||
position: 'bottom'
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function createLineChart(canvasId, label, labels, data, color) {
|
||||
const ctx = document.getElementById(canvasId).getContext('2d');
|
||||
|
||||
if (charts[canvasId]) {
|
||||
charts[canvasId].destroy();
|
||||
}
|
||||
|
||||
charts[canvasId] = new Chart(ctx, {
|
||||
type: 'line',
|
||||
data: {
|
||||
labels: labels,
|
||||
datasets: [{
|
||||
label: label,
|
||||
data: data,
|
||||
borderColor: color,
|
||||
backgroundColor: color + '20',
|
||||
fill: true,
|
||||
tension: 0.1
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
scales: {
|
||||
y: {
|
||||
beginAtZero: true
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function showError(message) {
|
||||
document.getElementById('loading').style.display = 'none';
|
||||
document.getElementById('error').style.display = 'block';
|
||||
document.getElementById('error').textContent = message;
|
||||
}
|
||||
|
||||
// Load dashboard on page load
|
||||
loadDashboard();
|
||||
|
||||
// Refresh every 5 minutes
|
||||
setInterval(loadDashboard, 5 * 60 * 1000);
|
||||
</script>
|
||||
</body>
|
||||
</html>`
|
||||
|
||||
w.Header().Set("Content-Type", "text/html")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte(html))
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
|
||||
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
|
@ -1,111 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/server/api"
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/server/dashboard"
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/server/storage"
|
||||
)
|
||||
|
||||
var (
|
||||
port = flag.Int("port", 8080, "HTTP server port")
|
||||
enableCORS = flag.Bool("cors", true, "Enable CORS for dashboard")
|
||||
logRequests = flag.Bool("log", true, "Log incoming requests")
|
||||
enableDashboard = flag.Bool("dashboard", true, "Enable built-in dashboard (optional when using Grafana)")
|
||||
cleanupInterval = flag.Duration("cleanup", 24*time.Hour, "Cleanup interval for old instances")
|
||||
maxInstanceAge = flag.Duration("max-age", 30*24*time.Hour, "Maximum age for instances before cleanup")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
// Create Prometheus storage instance
|
||||
store := storage.NewPrometheusStorage()
|
||||
|
||||
// Start cleanup routine
|
||||
go func() {
|
||||
ticker := time.NewTicker(*cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
store.CleanupOldInstances(*maxInstanceAge)
|
||||
}
|
||||
}()
|
||||
|
||||
// Setup HTTP handlers
|
||||
mux := http.NewServeMux()
|
||||
|
||||
// Prometheus metrics endpoint
|
||||
mux.Handle("/metrics", promhttp.Handler())
|
||||
|
||||
// API endpoints
|
||||
apiHandler := api.NewHandler(store)
|
||||
mux.HandleFunc("/api/collect", corsMiddleware(logMiddleware(apiHandler.CollectTelemetry)))
|
||||
mux.HandleFunc("/api/stats", corsMiddleware(logMiddleware(apiHandler.GetStats)))
|
||||
mux.HandleFunc("/api/instances", corsMiddleware(logMiddleware(apiHandler.GetInstances)))
|
||||
mux.HandleFunc("/api/metrics", corsMiddleware(logMiddleware(apiHandler.GetMetrics)))
|
||||
|
||||
// Dashboard (optional)
|
||||
if *enableDashboard {
|
||||
dashboardHandler := dashboard.NewHandler()
|
||||
mux.HandleFunc("/", corsMiddleware(dashboardHandler.ServeIndex))
|
||||
mux.HandleFunc("/dashboard", corsMiddleware(dashboardHandler.ServeIndex))
|
||||
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||
}
|
||||
|
||||
// Health check
|
||||
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(map[string]string{
|
||||
"status": "ok",
|
||||
"time": time.Now().UTC().Format(time.RFC3339),
|
||||
})
|
||||
})
|
||||
|
||||
addr := fmt.Sprintf(":%d", *port)
|
||||
log.Printf("Starting telemetry server on %s", addr)
|
||||
log.Printf("Prometheus metrics: http://localhost%s/metrics", addr)
|
||||
if *enableDashboard {
|
||||
log.Printf("Dashboard: http://localhost%s/dashboard", addr)
|
||||
}
|
||||
log.Printf("Cleanup interval: %v, Max instance age: %v", *cleanupInterval, *maxInstanceAge)
|
||||
|
||||
if err := http.ListenAndServe(addr, mux); err != nil {
|
||||
log.Fatalf("Server failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func corsMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if *enableCORS {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
|
||||
}
|
||||
|
||||
if r.Method == "OPTIONS" {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
|
||||
func logMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if *logRequests {
|
||||
start := time.Now()
|
||||
next(w, r)
|
||||
log.Printf("%s %s %s %v", r.Method, r.URL.Path, r.RemoteAddr, time.Since(start))
|
||||
} else {
|
||||
next(w, r)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,235 +0,0 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/proto"
|
||||
)
|
||||
|
||||
type PrometheusStorage struct {
|
||||
// Prometheus metrics
|
||||
totalClusters prometheus.Gauge
|
||||
activeClusters prometheus.Gauge
|
||||
volumeServerCount *prometheus.GaugeVec
|
||||
totalDiskBytes *prometheus.GaugeVec
|
||||
totalVolumeCount *prometheus.GaugeVec
|
||||
filerCount *prometheus.GaugeVec
|
||||
brokerCount *prometheus.GaugeVec
|
||||
clusterInfo *prometheus.GaugeVec
|
||||
telemetryReceived prometheus.Counter
|
||||
|
||||
// In-memory storage for API endpoints (if needed)
|
||||
mu sync.RWMutex
|
||||
instances map[string]*telemetryData
|
||||
stats map[string]interface{}
|
||||
}
|
||||
|
||||
// telemetryData is an internal struct that includes the received timestamp
|
||||
type telemetryData struct {
|
||||
*proto.TelemetryData
|
||||
ReceivedAt time.Time `json:"received_at"`
|
||||
}
|
||||
|
||||
func NewPrometheusStorage() *PrometheusStorage {
|
||||
return &PrometheusStorage{
|
||||
totalClusters: promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_total_clusters",
|
||||
Help: "Total number of unique SeaweedFS clusters (last 30 days)",
|
||||
}),
|
||||
activeClusters: promauto.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_active_clusters",
|
||||
Help: "Number of active SeaweedFS clusters (last 7 days)",
|
||||
}),
|
||||
volumeServerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_volume_servers",
|
||||
Help: "Number of volume servers per cluster",
|
||||
}, []string{"cluster_id", "version", "os"}),
|
||||
totalDiskBytes: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_disk_bytes",
|
||||
Help: "Total disk usage in bytes per cluster",
|
||||
}, []string{"cluster_id", "version", "os"}),
|
||||
totalVolumeCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_volume_count",
|
||||
Help: "Total number of volumes per cluster",
|
||||
}, []string{"cluster_id", "version", "os"}),
|
||||
filerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_filer_count",
|
||||
Help: "Number of filer servers per cluster",
|
||||
}, []string{"cluster_id", "version", "os"}),
|
||||
brokerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_broker_count",
|
||||
Help: "Number of broker servers per cluster",
|
||||
}, []string{"cluster_id", "version", "os"}),
|
||||
clusterInfo: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "seaweedfs_telemetry_cluster_info",
|
||||
Help: "Cluster information (always 1, labels contain metadata)",
|
||||
}, []string{"cluster_id", "version", "os"}),
|
||||
telemetryReceived: promauto.NewCounter(prometheus.CounterOpts{
|
||||
Name: "seaweedfs_telemetry_reports_received_total",
|
||||
Help: "Total number of telemetry reports received",
|
||||
}),
|
||||
instances: make(map[string]*telemetryData),
|
||||
stats: make(map[string]interface{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PrometheusStorage) StoreTelemetry(data *proto.TelemetryData) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Update Prometheus metrics
|
||||
labels := prometheus.Labels{
|
||||
"cluster_id": data.ClusterId,
|
||||
"version": data.Version,
|
||||
"os": data.Os,
|
||||
}
|
||||
|
||||
s.volumeServerCount.With(labels).Set(float64(data.VolumeServerCount))
|
||||
s.totalDiskBytes.With(labels).Set(float64(data.TotalDiskBytes))
|
||||
s.totalVolumeCount.With(labels).Set(float64(data.TotalVolumeCount))
|
||||
s.filerCount.With(labels).Set(float64(data.FilerCount))
|
||||
s.brokerCount.With(labels).Set(float64(data.BrokerCount))
|
||||
|
||||
infoLabels := prometheus.Labels{
|
||||
"cluster_id": data.ClusterId,
|
||||
"version": data.Version,
|
||||
"os": data.Os,
|
||||
}
|
||||
s.clusterInfo.With(infoLabels).Set(1)
|
||||
|
||||
s.telemetryReceived.Inc()
|
||||
|
||||
// Store in memory for API endpoints
|
||||
s.instances[data.ClusterId] = &telemetryData{
|
||||
TelemetryData: data,
|
||||
ReceivedAt: time.Now().UTC(),
|
||||
}
|
||||
|
||||
// Update aggregated stats
|
||||
s.updateStats()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PrometheusStorage) GetStats() (map[string]interface{}, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Return cached stats
|
||||
result := make(map[string]interface{})
|
||||
for k, v := range s.stats {
|
||||
result[k] = v
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *PrometheusStorage) GetInstances(limit int) ([]*telemetryData, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
var instances []*telemetryData
|
||||
count := 0
|
||||
for _, instance := range s.instances {
|
||||
if count >= limit {
|
||||
break
|
||||
}
|
||||
instances = append(instances, instance)
|
||||
count++
|
||||
}
|
||||
|
||||
return instances, nil
|
||||
}
|
||||
|
||||
func (s *PrometheusStorage) GetMetrics(days int) (map[string]interface{}, error) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
// Return current metrics from in-memory storage
|
||||
// Historical data should be queried from Prometheus directly
|
||||
cutoff := time.Now().AddDate(0, 0, -days)
|
||||
|
||||
var volumeServers []map[string]interface{}
|
||||
var diskUsage []map[string]interface{}
|
||||
|
||||
for _, instance := range s.instances {
|
||||
if instance.ReceivedAt.After(cutoff) {
|
||||
volumeServers = append(volumeServers, map[string]interface{}{
|
||||
"date": instance.ReceivedAt.Format("2006-01-02"),
|
||||
"value": instance.TelemetryData.VolumeServerCount,
|
||||
})
|
||||
diskUsage = append(diskUsage, map[string]interface{}{
|
||||
"date": instance.ReceivedAt.Format("2006-01-02"),
|
||||
"value": instance.TelemetryData.TotalDiskBytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]interface{}{
|
||||
"volume_servers": volumeServers,
|
||||
"disk_usage": diskUsage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PrometheusStorage) updateStats() {
|
||||
now := time.Now()
|
||||
last7Days := now.AddDate(0, 0, -7)
|
||||
last30Days := now.AddDate(0, 0, -30)
|
||||
|
||||
totalInstances := 0
|
||||
activeInstances := 0
|
||||
versions := make(map[string]int)
|
||||
osDistribution := make(map[string]int)
|
||||
|
||||
for _, instance := range s.instances {
|
||||
if instance.ReceivedAt.After(last30Days) {
|
||||
totalInstances++
|
||||
}
|
||||
if instance.ReceivedAt.After(last7Days) {
|
||||
activeInstances++
|
||||
versions[instance.TelemetryData.Version]++
|
||||
osDistribution[instance.TelemetryData.Os]++
|
||||
}
|
||||
}
|
||||
|
||||
// Update Prometheus gauges
|
||||
s.totalClusters.Set(float64(totalInstances))
|
||||
s.activeClusters.Set(float64(activeInstances))
|
||||
|
||||
// Update cached stats for API
|
||||
s.stats = map[string]interface{}{
|
||||
"total_instances": totalInstances,
|
||||
"active_instances": activeInstances,
|
||||
"versions": versions,
|
||||
"os_distribution": osDistribution,
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupOldInstances removes instances older than the specified duration
|
||||
func (s *PrometheusStorage) CleanupOldInstances(maxAge time.Duration) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
cutoff := time.Now().Add(-maxAge)
|
||||
for instanceID, instance := range s.instances {
|
||||
if instance.ReceivedAt.Before(cutoff) {
|
||||
delete(s.instances, instanceID)
|
||||
|
||||
// Remove from Prometheus metrics
|
||||
labels := prometheus.Labels{
|
||||
"cluster_id": instance.TelemetryData.ClusterId,
|
||||
"version": instance.TelemetryData.Version,
|
||||
"os": instance.TelemetryData.Os,
|
||||
}
|
||||
s.volumeServerCount.Delete(labels)
|
||||
s.totalDiskBytes.Delete(labels)
|
||||
s.totalVolumeCount.Delete(labels)
|
||||
s.filerCount.Delete(labels)
|
||||
s.brokerCount.Delete(labels)
|
||||
}
|
||||
}
|
||||
|
||||
s.updateStats()
|
||||
}
|
|
@ -1,311 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/telemetry/proto"
|
||||
"github.com/seaweedfs/seaweedfs/weed/telemetry"
|
||||
protobuf "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
serverPort = "18080" // Use different port to avoid conflicts
|
||||
serverURL = "http://localhost:" + serverPort
|
||||
)
|
||||
|
||||
func main() {
|
||||
fmt.Println("🧪 Starting SeaweedFS Telemetry Integration Test")
|
||||
|
||||
// Start telemetry server
|
||||
fmt.Println("📡 Starting telemetry server...")
|
||||
serverCmd, err := startTelemetryServer()
|
||||
if err != nil {
|
||||
log.Fatalf("❌ Failed to start telemetry server: %v", err)
|
||||
}
|
||||
defer stopServer(serverCmd)
|
||||
|
||||
// Wait for server to start
|
||||
if !waitForServer(serverURL+"/health", 15*time.Second) {
|
||||
log.Fatal("❌ Telemetry server failed to start")
|
||||
}
|
||||
fmt.Println("✅ Telemetry server started successfully")
|
||||
|
||||
// Test protobuf marshaling first
|
||||
fmt.Println("🔧 Testing protobuf marshaling...")
|
||||
if err := testProtobufMarshaling(); err != nil {
|
||||
log.Fatalf("❌ Protobuf marshaling test failed: %v", err)
|
||||
}
|
||||
fmt.Println("✅ Protobuf marshaling test passed")
|
||||
|
||||
// Test protobuf client
|
||||
fmt.Println("🔄 Testing protobuf telemetry client...")
|
||||
if err := testTelemetryClient(); err != nil {
|
||||
log.Fatalf("❌ Telemetry client test failed: %v", err)
|
||||
}
|
||||
fmt.Println("✅ Telemetry client test passed")
|
||||
|
||||
// Test server metrics endpoint
|
||||
fmt.Println("📊 Testing Prometheus metrics endpoint...")
|
||||
if err := testMetricsEndpoint(); err != nil {
|
||||
log.Fatalf("❌ Metrics endpoint test failed: %v", err)
|
||||
}
|
||||
fmt.Println("✅ Metrics endpoint test passed")
|
||||
|
||||
// Test stats API
|
||||
fmt.Println("📈 Testing stats API...")
|
||||
if err := testStatsAPI(); err != nil {
|
||||
log.Fatalf("❌ Stats API test failed: %v", err)
|
||||
}
|
||||
fmt.Println("✅ Stats API test passed")
|
||||
|
||||
// Test instances API
|
||||
fmt.Println("📋 Testing instances API...")
|
||||
if err := testInstancesAPI(); err != nil {
|
||||
log.Fatalf("❌ Instances API test failed: %v", err)
|
||||
}
|
||||
fmt.Println("✅ Instances API test passed")
|
||||
|
||||
fmt.Println("🎉 All telemetry integration tests passed!")
|
||||
}
|
||||
|
||||
func startTelemetryServer() (*exec.Cmd, error) {
|
||||
// Get the directory where this test is running
|
||||
testDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get working directory: %v", err)
|
||||
}
|
||||
|
||||
// Navigate to the server directory (from main seaweedfs directory)
|
||||
serverDir := filepath.Join(testDir, "telemetry", "server")
|
||||
|
||||
cmd := exec.Command("go", "run", ".",
|
||||
"-port="+serverPort,
|
||||
"-dashboard=false",
|
||||
"-cleanup=1m",
|
||||
"-max-age=1h")
|
||||
|
||||
cmd.Dir = serverDir
|
||||
|
||||
// Create log files for server output
|
||||
logFile, err := os.Create("telemetry-server-test.log")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create log file: %v", err)
|
||||
}
|
||||
|
||||
cmd.Stdout = logFile
|
||||
cmd.Stderr = logFile
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
return nil, fmt.Errorf("failed to start server: %v", err)
|
||||
}
|
||||
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func stopServer(cmd *exec.Cmd) {
|
||||
if cmd != nil && cmd.Process != nil {
|
||||
cmd.Process.Signal(syscall.SIGTERM)
|
||||
cmd.Wait()
|
||||
|
||||
// Clean up log file
|
||||
os.Remove("telemetry-server-test.log")
|
||||
}
|
||||
}
|
||||
|
||||
func waitForServer(url string, timeout time.Duration) bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
fmt.Printf("⏳ Waiting for server at %s...\n", url)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return false
|
||||
default:
|
||||
resp, err := http.Get(url)
|
||||
if err == nil {
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode == http.StatusOK {
|
||||
return true
|
||||
}
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testProtobufMarshaling() error {
|
||||
// Test protobuf marshaling/unmarshaling
|
||||
testData := &proto.TelemetryData{
|
||||
ClusterId: "test-cluster-12345",
|
||||
Version: "test-3.45",
|
||||
Os: "linux/amd64",
|
||||
VolumeServerCount: 2,
|
||||
TotalDiskBytes: 1000000,
|
||||
TotalVolumeCount: 10,
|
||||
FilerCount: 1,
|
||||
BrokerCount: 1,
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Marshal
|
||||
data, err := protobuf.Marshal(testData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal protobuf: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" Protobuf size: %d bytes\n", len(data))
|
||||
|
||||
// Unmarshal
|
||||
testData2 := &proto.TelemetryData{}
|
||||
if err := protobuf.Unmarshal(data, testData2); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal protobuf: %v", err)
|
||||
}
|
||||
|
||||
// Verify data
|
||||
if testData2.ClusterId != testData.ClusterId {
|
||||
return fmt.Errorf("protobuf data mismatch: expected %s, got %s",
|
||||
testData.ClusterId, testData2.ClusterId)
|
||||
}
|
||||
|
||||
if testData2.VolumeServerCount != testData.VolumeServerCount {
|
||||
return fmt.Errorf("volume server count mismatch: expected %d, got %d",
|
||||
testData.VolumeServerCount, testData2.VolumeServerCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testTelemetryClient() error {
|
||||
// Create telemetry client
|
||||
client := telemetry.NewClient(serverURL+"/api/collect", true)
|
||||
|
||||
// Create test data using protobuf format
|
||||
testData := &proto.TelemetryData{
|
||||
Version: "test-3.45",
|
||||
Os: "linux/amd64",
|
||||
VolumeServerCount: 3,
|
||||
TotalDiskBytes: 1073741824, // 1GB
|
||||
TotalVolumeCount: 50,
|
||||
FilerCount: 2,
|
||||
BrokerCount: 1,
|
||||
Timestamp: time.Now().Unix(),
|
||||
}
|
||||
|
||||
// Send telemetry data
|
||||
if err := client.SendTelemetry(testData); err != nil {
|
||||
return fmt.Errorf("failed to send telemetry: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" Sent telemetry for cluster: %s\n", client.GetInstanceID())
|
||||
|
||||
// Wait a bit for processing
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testMetricsEndpoint() error {
|
||||
resp, err := http.Get(serverURL + "/metrics")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get metrics: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("metrics endpoint returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Read response and check for expected metrics
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read metrics response: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(content)
|
||||
expectedMetrics := []string{
|
||||
"seaweedfs_telemetry_total_clusters",
|
||||
"seaweedfs_telemetry_active_clusters",
|
||||
"seaweedfs_telemetry_reports_received_total",
|
||||
"seaweedfs_telemetry_volume_servers",
|
||||
"seaweedfs_telemetry_disk_bytes",
|
||||
"seaweedfs_telemetry_volume_count",
|
||||
"seaweedfs_telemetry_filer_count",
|
||||
"seaweedfs_telemetry_broker_count",
|
||||
}
|
||||
|
||||
for _, metric := range expectedMetrics {
|
||||
if !strings.Contains(contentStr, metric) {
|
||||
return fmt.Errorf("missing expected metric: %s", metric)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we have at least one report received
|
||||
if !strings.Contains(contentStr, "seaweedfs_telemetry_reports_received_total 1") {
|
||||
fmt.Printf(" Warning: Expected at least 1 report received, metrics content:\n%s\n", contentStr)
|
||||
}
|
||||
|
||||
fmt.Printf(" Found %d expected metrics\n", len(expectedMetrics))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testStatsAPI() error {
|
||||
resp, err := http.Get(serverURL + "/api/stats")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get stats: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("stats API returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Read and verify JSON response
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read stats response: %v", err)
|
||||
}
|
||||
|
||||
contentStr := string(content)
|
||||
if !strings.Contains(contentStr, "total_instances") {
|
||||
return fmt.Errorf("stats response missing total_instances field")
|
||||
}
|
||||
|
||||
fmt.Printf(" Stats response: %s\n", contentStr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testInstancesAPI() error {
|
||||
resp, err := http.Get(serverURL + "/api/instances?limit=10")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get instances: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("instances API returned status %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Read response
|
||||
content, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read instances response: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf(" Instances response length: %d bytes\n", len(content))
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,13 +1,12 @@
|
|||
package lock_manager
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type LockRingSnapshot struct {
|
||||
|
@ -23,7 +22,6 @@ type LockRing struct {
|
|||
lastCompactTime time.Time
|
||||
snapshotInterval time.Duration
|
||||
onTakeSnapshot func(snapshot []pb.ServerAddress)
|
||||
cleanupWg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewLockRing(snapshotInterval time.Duration) *LockRing {
|
||||
|
@ -89,9 +87,7 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
|
|||
|
||||
r.addOneSnapshot(servers)
|
||||
|
||||
r.cleanupWg.Add(1)
|
||||
go func() {
|
||||
defer r.cleanupWg.Done()
|
||||
<-time.After(r.snapshotInterval)
|
||||
r.compactSnapshots()
|
||||
}()
|
||||
|
@ -100,9 +96,7 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
|
|||
func (r *LockRing) takeSnapshotWithDelayedCompaction() {
|
||||
r.doTakeSnapshot()
|
||||
|
||||
r.cleanupWg.Add(1)
|
||||
go func() {
|
||||
defer r.cleanupWg.Done()
|
||||
<-time.After(r.snapshotInterval)
|
||||
r.compactSnapshots()
|
||||
}()
|
||||
|
@ -178,19 +172,6 @@ func (r *LockRing) GetSnapshot() (servers []pb.ServerAddress) {
|
|||
return r.snapshots[0].servers
|
||||
}
|
||||
|
||||
// WaitForCleanup waits for all pending cleanup operations to complete
|
||||
// This is useful for testing to ensure deterministic behavior
|
||||
func (r *LockRing) WaitForCleanup() {
|
||||
r.cleanupWg.Wait()
|
||||
}
|
||||
|
||||
// GetSnapshotCount safely returns the number of snapshots for testing
|
||||
func (r *LockRing) GetSnapshotCount() int {
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
return len(r.snapshots)
|
||||
}
|
||||
|
||||
func hashKeyToServer(key string, servers []pb.ServerAddress) pb.ServerAddress {
|
||||
if len(servers) == 0 {
|
||||
return ""
|
||||
|
|
|
@ -1,91 +1,43 @@
|
|||
package lock_manager
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestAddServer(t *testing.T) {
|
||||
r := NewLockRing(100 * time.Millisecond)
|
||||
|
||||
// Add servers
|
||||
r.AddServer("localhost:8080")
|
||||
assert.Equal(t, 1, len(r.snapshots))
|
||||
r.AddServer("localhost:8081")
|
||||
r.AddServer("localhost:8082")
|
||||
r.AddServer("localhost:8083")
|
||||
r.AddServer("localhost:8084")
|
||||
|
||||
// Verify all servers are present
|
||||
servers := r.GetSnapshot()
|
||||
assert.Equal(t, 5, len(servers))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8080"))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8082"))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8084"))
|
||||
|
||||
// Remove servers
|
||||
r.RemoveServer("localhost:8084")
|
||||
r.RemoveServer("localhost:8082")
|
||||
r.RemoveServer("localhost:8080")
|
||||
|
||||
// Wait for all cleanup operations to complete
|
||||
r.WaitForCleanup()
|
||||
assert.Equal(t, 8, len(r.snapshots))
|
||||
|
||||
// Verify only 2 servers remain (localhost:8081 and localhost:8083)
|
||||
servers = r.GetSnapshot()
|
||||
assert.Equal(t, 2, len(servers))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
|
||||
|
||||
// Verify cleanup has happened - wait for snapshot interval and check snapshots are compacted
|
||||
time.Sleep(110 * time.Millisecond)
|
||||
r.WaitForCleanup()
|
||||
|
||||
// Verify snapshot history is cleaned up properly (should have at most 2 snapshots after compaction)
|
||||
snapshotCount := r.GetSnapshotCount()
|
||||
assert.LessOrEqual(t, snapshotCount, 2, "Snapshot history should be compacted")
|
||||
assert.Equal(t, 2, len(r.snapshots))
|
||||
|
||||
}
|
||||
|
||||
func TestLockRing(t *testing.T) {
|
||||
r := NewLockRing(100 * time.Millisecond)
|
||||
|
||||
// Test initial snapshot
|
||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081"})
|
||||
assert.Equal(t, 1, r.GetSnapshotCount())
|
||||
servers := r.GetSnapshot()
|
||||
assert.Equal(t, 2, len(servers))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8080"))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
|
||||
|
||||
// Add another server
|
||||
assert.Equal(t, 1, len(r.snapshots))
|
||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082"})
|
||||
assert.Equal(t, 2, r.GetSnapshotCount())
|
||||
servers = r.GetSnapshot()
|
||||
assert.Equal(t, 3, len(servers))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8082"))
|
||||
|
||||
// Wait for cleanup interval and add another server
|
||||
assert.Equal(t, 2, len(r.snapshots))
|
||||
time.Sleep(110 * time.Millisecond)
|
||||
r.WaitForCleanup()
|
||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083"})
|
||||
assert.LessOrEqual(t, r.GetSnapshotCount(), 3)
|
||||
servers = r.GetSnapshot()
|
||||
assert.Equal(t, 4, len(servers))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
|
||||
|
||||
// Wait for cleanup and verify compaction
|
||||
assert.Equal(t, 3, len(r.snapshots))
|
||||
time.Sleep(110 * time.Millisecond)
|
||||
r.WaitForCleanup()
|
||||
assert.LessOrEqual(t, r.GetSnapshotCount(), 2, "Snapshots should be compacted")
|
||||
|
||||
// Add final server
|
||||
assert.Equal(t, 2, len(r.snapshots))
|
||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083", "localhost:8084"})
|
||||
servers = r.GetSnapshot()
|
||||
assert.Equal(t, 5, len(servers))
|
||||
assert.Contains(t, servers, pb.ServerAddress("localhost:8084"))
|
||||
assert.LessOrEqual(t, r.GetSnapshotCount(), 3)
|
||||
assert.Equal(t, 3, len(r.snapshots))
|
||||
}
|
||||
|
|
|
@ -115,10 +115,7 @@ func runBackup(cmd *Command, args []string) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
ver := needle.Version(stats.Version)
|
||||
|
||||
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0)
|
||||
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
||||
return true
|
||||
|
@ -145,7 +142,7 @@ func runBackup(cmd *Command, args []string) bool {
|
|||
fmt.Printf("Error destroying volume: %v\n", err)
|
||||
}
|
||||
// recreate an empty volume
|
||||
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0)
|
||||
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
||||
return true
|
||||
|
|
|
@ -41,7 +41,7 @@ func runCompact(cmd *Command, args []string) bool {
|
|||
preallocate := *compactVolumePreallocate * (1 << 20)
|
||||
|
||||
vid := needle.VolumeId(*compactVolumeId)
|
||||
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, needle.GetCurrentVersion(), 0, 0)
|
||||
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0, 0)
|
||||
if err != nil {
|
||||
glog.Fatalf("Load Volume [ERROR] %s\n", err)
|
||||
}
|
||||
|
|
|
@ -193,13 +193,6 @@ func runFuse(cmd *Command, args []string) bool {
|
|||
} else {
|
||||
panic(fmt.Errorf("readOnly: %s", err))
|
||||
}
|
||||
case "disableXAttr":
|
||||
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
|
||||
|
||||
mountOptions.disableXAttr = &parsed
|
||||
} else {
|
||||
panic(fmt.Errorf("disableXAttr: %s", err))
|
||||
}
|
||||
case "cpuprofile":
|
||||
mountCpuProfile = ¶meter.value
|
||||
case "memprofile":
|
||||
|
|
|
@ -3,14 +3,13 @@ package command
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||
|
||||
hashicorpRaft "github.com/hashicorp/raft"
|
||||
|
||||
"slices"
|
||||
|
@ -61,8 +60,6 @@ type MasterOptions struct {
|
|||
electionTimeout *time.Duration
|
||||
raftHashicorp *bool
|
||||
raftBootstrap *bool
|
||||
telemetryUrl *string
|
||||
telemetryEnabled *bool
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -90,8 +87,6 @@ func init() {
|
|||
m.electionTimeout = cmdMaster.Flag.Duration("electionTimeout", 10*time.Second, "election timeout of master servers")
|
||||
m.raftHashicorp = cmdMaster.Flag.Bool("raftHashicorp", false, "use hashicorp raft")
|
||||
m.raftBootstrap = cmdMaster.Flag.Bool("raftBootstrap", false, "Whether to bootstrap the Raft cluster")
|
||||
m.telemetryUrl = cmdMaster.Flag.String("telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics")
|
||||
m.telemetryEnabled = cmdMaster.Flag.Bool("telemetry", false, "enable telemetry reporting")
|
||||
}
|
||||
|
||||
var cmdMaster = &Command{
|
||||
|
@ -116,11 +111,6 @@ func runMaster(cmd *Command, args []string) bool {
|
|||
util.LoadSecurityConfiguration()
|
||||
util.LoadConfiguration("master", false)
|
||||
|
||||
// bind viper configuration to command line flags
|
||||
if v := util.GetViper().GetString("master.mdir"); v != "" {
|
||||
*m.metaFolder = v
|
||||
}
|
||||
|
||||
grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
|
||||
|
||||
parent, _ := util.FullPath(*m.metaFolder).DirAndName()
|
||||
|
@ -336,7 +326,5 @@ func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOp
|
|||
DisableHttp: *m.disableHttp,
|
||||
MetricsAddress: *m.metricsAddress,
|
||||
MetricsIntervalSec: *m.metricsIntervalSec,
|
||||
TelemetryUrl: *m.telemetryUrl,
|
||||
TelemetryEnabled: *m.telemetryEnabled,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
package command
|
||||
|
||||
func checkMountPointAvailable(dir string) bool {
|
||||
return true
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
//go:build !linux && !darwin && !freebsd
|
||||
// +build !linux,!darwin,!freebsd
|
||||
//go:build !linux && !darwin
|
||||
// +build !linux,!darwin
|
||||
|
||||
package command
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
//go:build linux || darwin || freebsd
|
||||
// +build linux darwin freebsd
|
||||
//go:build linux || darwin
|
||||
// +build linux darwin
|
||||
|
||||
package command
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -16,8 +17,6 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mount"
|
||||
|
|
|
@ -104,8 +104,6 @@ func init() {
|
|||
masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
|
||||
masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
|
||||
masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers")
|
||||
masterOptions.telemetryUrl = cmdServer.Flag.String("master.telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics")
|
||||
masterOptions.telemetryEnabled = cmdServer.Flag.Bool("master.telemetry", false, "enable telemetry reporting")
|
||||
|
||||
filerOptions.filerGroup = cmdServer.Flag.String("filer.filerGroup", "", "share metadata with other filers in the same filerGroup")
|
||||
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
||||
|
|
|
@ -19,8 +19,5 @@ func runVersion(cmd *Command, args []string) bool {
|
|||
}
|
||||
|
||||
fmt.Printf("version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH)
|
||||
println()
|
||||
println("For enterprise users, please visit https://seaweedfs.com for SeaweedFS Enterprise Edition,")
|
||||
println("which has a self-healing storage format with better data protection.")
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
|||
if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") {
|
||||
// now the insert failed possibly due to duplication constraints
|
||||
sqlInsert = "falls back to update"
|
||||
glog.V(1).InfofCtx(ctx, "insert %s %s: %v", entry.FullPath, sqlInsert, err)
|
||||
glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err)
|
||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
|
|||
}
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
||||
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
||||
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
||||
|
@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
|||
var name string
|
||||
var data []byte
|
||||
if err = rows.Scan(&name, &data); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "scan %s : %v", dirPath, err)
|
||||
glog.V(0).Infof("scan %s : %v", dirPath, err)
|
||||
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
||||
}
|
||||
lastFileName = name
|
||||
|
@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
|||
FullPath: util.NewFullPath(string(dirPath), name),
|
||||
}
|
||||
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "scan decode %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
|
||||
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
|
|||
}
|
||||
|
||||
// now the insert failed possibly due to duplication constraints
|
||||
glog.V(1).InfofCtx(ctx, "kv insert falls back to update: %s", err)
|
||||
glog.V(1).Infof("kv insert falls back to update: %s", err)
|
||||
|
||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
|
||||
if err != nil {
|
||||
|
|
|
@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat
|
|||
if driver.IsNotFound(err) {
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
|
||||
glog.Errorf("find %s: %v", fullpath, err)
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
if len(data.Meta) == 0 {
|
||||
|
@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP
|
|||
}
|
||||
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
|
||||
if err != nil && !driver.IsNotFound(err) {
|
||||
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
|
||||
glog.Errorf("find %s: %v", fullpath, err)
|
||||
return fmt.Errorf("delete %s : %v", fullpath, err)
|
||||
}
|
||||
return nil
|
||||
|
@ -331,7 +331,7 @@ sort d.name asc
|
|||
converted := arrayToBytes(data.Meta)
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
|
|||
return nil, filer.ErrKvNotFound
|
||||
}
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "kv get: %s %v", string(key), err)
|
||||
glog.Errorf("kv get: %s %v", string(key), err)
|
||||
return nil, filer.ErrKvNotFound
|
||||
}
|
||||
return arrayToBytes(model.Meta), nil
|
||||
|
@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
|
|||
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "kv del: %v", err)
|
||||
glog.Errorf("kv del: %v", err)
|
||||
return filer.ErrKvNotFound
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -4,9 +4,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
|
@ -203,7 +202,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
|
|||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -211,7 +210,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
|
|||
}
|
||||
}
|
||||
if err = iter.Close(); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return lastFileName, err
|
||||
|
|
|
@ -4,9 +4,8 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gocql/gocql"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
|
@ -203,7 +202,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
|
|||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
@ -211,7 +210,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
|
|||
}
|
||||
}
|
||||
if err = iter.Close(); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return lastFileName, err
|
||||
|
|
|
@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
|||
}
|
||||
value, err := jsoniter.Marshal(esEntry)
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
return fmt.Errorf("insert entry marshal %v", err)
|
||||
}
|
||||
_, err = store.client.Index().
|
||||
|
@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
|||
BodyJson(string(value)).
|
||||
Do(ctx)
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
||||
return fmt.Errorf("insert entry %v", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
|
|||
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
|
||||
return esEntry.Entry, err
|
||||
}
|
||||
glog.ErrorfCtx(ctx, "find entry(%s),%v.", string(fullpath), err)
|
||||
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
||||
|
@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
|
|||
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
|
||||
return nil
|
||||
}
|
||||
glog.ErrorfCtx(ctx, "delete index(%s) %v.", index, err)
|
||||
glog.Errorf("delete index(%s) %v.", index, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
|
|||
return nil
|
||||
}
|
||||
}
|
||||
glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err)
|
||||
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
|
||||
return fmt.Errorf("delete entry %v", err)
|
||||
}
|
||||
|
||||
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
|
||||
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
|
||||
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
|
||||
glog.ErrorfCtx(ctx, "elastic delete %s: %v.", entry.FullPath, err)
|
||||
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
|||
result := &elastic.SearchResult{}
|
||||
if (startFileName == "" && first) || inclusive {
|
||||
if result, err = store.search(ctx, index, parentId); err != nil {
|
||||
glog.ErrorfCtx(ctx, "search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
|
@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
|||
}
|
||||
after := weed_util.Md5String([]byte(fullPath))
|
||||
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
|
||||
glog.ErrorfCtx(ctx, "searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
|
|||
return nil
|
||||
}
|
||||
}
|
||||
glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err)
|
||||
glog.Errorf("delete key(id:%s) %v.", string(key), err)
|
||||
return fmt.Errorf("delete key %v", err)
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
|
|||
return esEntry.Value, nil
|
||||
}
|
||||
}
|
||||
glog.ErrorfCtx(ctx, "find key(%s),%v.", string(key), err)
|
||||
glog.Errorf("find key(%s),%v.", string(key), err)
|
||||
return value, filer.ErrKvNotFound
|
||||
}
|
||||
|
||||
|
@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
|
|||
esEntry := &ESKVEntry{value}
|
||||
val, err := jsoniter.Marshal(esEntry)
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err)
|
||||
glog.Errorf("insert key(%s) %v.", string(key), err)
|
||||
return fmt.Errorf("insert key %v", err)
|
||||
}
|
||||
_, err = store.client.Index().
|
||||
|
|
|
@ -4,11 +4,10 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||
|
||||
"go.etcd.io/etcd/client/v3"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
|
@ -96,7 +95,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t
|
|||
return fmt.Errorf("error checking etcd connection: %s", err)
|
||||
}
|
||||
|
||||
glog.V(0).InfofCtx(ctx, "сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
|
||||
glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
|
||||
store.client = client
|
||||
|
||||
return nil
|
||||
|
@ -209,7 +208,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
|
|||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -106,7 +106,7 @@ func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.Lookup
|
|||
func fetchWholeChunk(ctx context.Context, bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
|
||||
urlStrings, err := lookupFileIdFn(ctx, fileId)
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
||||
return err
|
||||
}
|
||||
err = retriedStreamFetchChunkData(ctx, bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
|
||||
|
@ -159,7 +159,7 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin
|
|||
break
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "read %s failed, err: %v", urlString, err)
|
||||
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin
|
|||
break
|
||||
}
|
||||
if err != nil && shouldRetry {
|
||||
glog.V(0).InfofCtx(ctx, "retry reading in %v", waitTime)
|
||||
glog.V(0).Infof("retry reading in %v", waitTime)
|
||||
time.Sleep(waitTime)
|
||||
} else {
|
||||
break
|
||||
|
|
|
@ -220,19 +220,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
|||
}
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
||||
glog.ErrorfCtx(ctx, "insert entry %s: %v", entry.FullPath, err)
|
||||
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
} else {
|
||||
if o_excl {
|
||||
glog.V(3).InfofCtx(ctx, "EEXIST: entry %s already exists", entry.FullPath)
|
||||
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
||||
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
||||
}
|
||||
glog.V(4).InfofCtx(ctx, "UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
||||
glog.ErrorfCtx(ctx, "update entry %s: %v", entry.FullPath, err)
|
||||
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||
}
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
|||
|
||||
f.deleteChunksIfNotNew(ctx, oldEntry, entry)
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "CreateEntry %s: created", entry.FullPath)
|
||||
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
|||
// fmt.Printf("%d dirPath: %+v\n", level, dirPath)
|
||||
|
||||
// check the store directly
|
||||
glog.V(4).InfofCtx(ctx, "find uncached directory: %s", dirPath)
|
||||
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
||||
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
|
||||
|
||||
// no such existing directory
|
||||
|
@ -291,11 +291,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
|||
},
|
||||
}
|
||||
|
||||
glog.V(2).InfofCtx(ctx, "create directory: %s %v", dirPath, dirEntry.Mode)
|
||||
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
|
||||
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
|
||||
if mkdirErr != nil {
|
||||
if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil {
|
||||
glog.V(3).InfofCtx(ctx, "mkdir %s: %v", dirPath, mkdirErr)
|
||||
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
|
||||
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
||||
}
|
||||
} else {
|
||||
|
@ -305,7 +305,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
|||
}
|
||||
|
||||
} else if !dirEntry.IsDirectory() {
|
||||
glog.ErrorfCtx(ctx, "CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
||||
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
||||
return fmt.Errorf("%s is a file", dirPath)
|
||||
}
|
||||
|
||||
|
@ -316,11 +316,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
|
|||
if oldEntry != nil {
|
||||
entry.Attr.Crtime = oldEntry.Attr.Crtime
|
||||
if oldEntry.IsDirectory() && !entry.IsDirectory() {
|
||||
glog.ErrorfCtx(ctx, "existing %s is a directory", oldEntry.FullPath)
|
||||
glog.Errorf("existing %s is a directory", oldEntry.FullPath)
|
||||
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
|
||||
}
|
||||
if !oldEntry.IsDirectory() && entry.IsDirectory() {
|
||||
glog.ErrorfCtx(ctx, "existing %s is a file", oldEntry.FullPath)
|
||||
glog.Errorf("existing %s is a file", oldEntry.FullPath)
|
||||
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
glog.V(2).InfofCtx(ctx, "delete directory %s: %v", p, err)
|
||||
glog.V(2).Infof("delete directory %s: %v", p, err)
|
||||
return fmt.Errorf("delete directory %s: %v", p, err)
|
||||
}
|
||||
}
|
||||
|
@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
for {
|
||||
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "list folder %s: %v", entry.FullPath, err)
|
||||
glog.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||
}
|
||||
if lastFileName == "" && !isRecursive && len(entries) > 0 {
|
||||
// only for first iteration in the loop
|
||||
glog.V(2).InfofCtx(ctx, "deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||
glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
|
||||
}
|
||||
|
||||
|
@ -110,7 +110,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
}
|
||||
}
|
||||
|
||||
glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
|
||||
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
|
||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||
|
@ -124,7 +124,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
|||
|
||||
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
|
||||
|
||||
glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||
|
||||
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
|
||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||
|
@ -153,7 +153,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
|
|||
func (f *Filer) maybeDeleteHardLinks(ctx context.Context, hardLinkIds []HardLinkId) {
|
||||
for _, hardLinkId := range hardLinkIds {
|
||||
if err := f.Store.DeleteHardLink(ctx, hardLinkId); err != nil {
|
||||
glog.ErrorfCtx(ctx, "delete hard link id %d : %v", hardLinkId, err)
|
||||
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ func (f *Filer) doDeleteChunks(ctx context.Context, chunks []*filer_pb.FileChunk
|
|||
}
|
||||
dataChunks, manifestResolveErr := ResolveOneChunkManifest(ctx, f.MasterClient.LookupFileId, chunk)
|
||||
if manifestResolveErr != nil {
|
||||
glog.V(0).InfofCtx(ctx, "failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
|
||||
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
|
||||
}
|
||||
for _, dChunk := range dataChunks {
|
||||
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
|
||||
|
@ -119,7 +119,7 @@ func (f *Filer) deleteChunksIfNotNew(ctx context.Context, oldEntry, newEntry *En
|
|||
|
||||
toDelete, err := MinusChunks(ctx, f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
|
||||
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
|
||||
return
|
||||
}
|
||||
f.DeleteChunksNotRecursive(toDelete)
|
||||
|
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
)
|
||||
|
@ -32,7 +31,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry
|
|||
|
||||
// remove old hard link
|
||||
if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
|
||||
glog.V(4).InfofCtx(ctx, "handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
|
||||
glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
|
||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -51,7 +50,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err
|
|||
return encodeErr
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
|
||||
return fsw.KvPut(ctx, key, newBlob)
|
||||
}
|
||||
|
@ -64,16 +63,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
|
|||
|
||||
value, err := fsw.KvGet(ctx, key)
|
||||
if err != nil {
|
||||
glog.ErrorfCtx(ctx, "read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = entry.DecodeAttributesAndChunks(value); err != nil {
|
||||
glog.ErrorfCtx(ctx, "decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -95,7 +94,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
|
|||
|
||||
entry.HardLinkCounter--
|
||||
if entry.HardLinkCounter <= 0 {
|
||||
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvDelete %v", key)
|
||||
glog.V(4).Infof("DeleteHardLink KvDelete %v", key)
|
||||
return fsw.KvDelete(ctx, key)
|
||||
}
|
||||
|
||||
|
@ -104,7 +103,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
|
|||
return encodeErr
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvPut %v", key)
|
||||
glog.V(4).Infof("DeleteHardLink KvPut %v", key)
|
||||
return fsw.KvPut(ctx, key, newBlob)
|
||||
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
|
|||
// remove hard link
|
||||
op := ctx.Value("OP")
|
||||
if op != "MV" {
|
||||
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
|
||||
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry
|
|||
// remove hard link
|
||||
op := ctx.Value("OP")
|
||||
if op != "MV" {
|
||||
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
|
||||
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
|
|||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -4,14 +4,13 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
|
@ -206,7 +205,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
|||
}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
|||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
|||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
|
|||
var where = bson.M{"directory": dir, "name": name}
|
||||
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
||||
if err != mongo.ErrNoDocuments && err != nil {
|
||||
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
|
||||
glog.Errorf("find %s: %v", fullpath, err)
|
||||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
||||
|
@ -234,22 +234,14 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
|||
"directory": string(dirPath),
|
||||
}
|
||||
|
||||
nameQuery := bson.M{}
|
||||
|
||||
if len(prefix) > 0 {
|
||||
nameQuery["$regex"] = "^" + regexp.QuoteMeta(prefix)
|
||||
where["name"].(bson.M)["$regex"] = "^" + regexp.QuoteMeta(prefix)
|
||||
}
|
||||
|
||||
if len(startFileName) > 0 {
|
||||
if includeStartFile {
|
||||
nameQuery["$gte"] = startFileName
|
||||
} else {
|
||||
nameQuery["$gt"] = startFileName
|
||||
}
|
||||
}
|
||||
|
||||
if len(nameQuery) > 0 {
|
||||
where["name"] = nameQuery
|
||||
if includeStartFile {
|
||||
where["name"].(bson.M)["$gte"] = startFileName
|
||||
} else {
|
||||
where["name"].(bson.M)["$gt"] = startFileName
|
||||
}
|
||||
|
||||
optLimit := int64(limit)
|
||||
|
@ -272,7 +264,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
|||
lastFileName = data.Name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -283,7 +275,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
|||
}
|
||||
|
||||
if err := cur.Close(ctx); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
|
||||
glog.V(0).Infof("list iterator close: %v", err)
|
||||
}
|
||||
|
||||
return lastFileName, err
|
||||
|
|
|
@ -3,7 +3,6 @@ package mongodb
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"go.mongodb.org/mongo-driver/bson"
|
||||
|
@ -38,7 +37,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
|
|||
var where = bson.M{"directory": dir, "name": name}
|
||||
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
||||
if err != mongo.ErrNoDocuments && err != nil {
|
||||
glog.ErrorfCtx(ctx, "kv get: %v", err)
|
||||
glog.Errorf("kv get: %v", err)
|
||||
return nil, filer.ErrKvNotFound
|
||||
}
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
|
|||
|
||||
locations = resp.LocationsMap[vid]
|
||||
if locations == nil || len(locations.Locations) == 0 {
|
||||
glog.V(0).InfofCtx(ctx, "failed to locate %s", fileId)
|
||||
glog.V(0).Infof("failed to locate %s", fileId)
|
||||
return fmt.Errorf("failed to locate %s", fileId)
|
||||
}
|
||||
vicCacheLock.Lock()
|
||||
|
|
|
@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
|
|||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir
|
|||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -3,7 +3,6 @@ package redis3
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
)
|
||||
|
@ -32,7 +31,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri
|
|||
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||
|
||||
if err := nameList.WriteName(name); err != nil {
|
||||
glog.ErrorfCtx(ctx, "add %s %s: %v", key, name, err)
|
||||
glog.Errorf("add %s %s: %v", key, name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -101,7 +100,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s
|
|||
|
||||
if err = nameList.ListNames("", func(name string) bool {
|
||||
if err := onDeleteFn(name); err != nil {
|
||||
glog.ErrorfCtx(ctx, "delete %s child %s: %v", key, name, err)
|
||||
glog.Errorf("delete %s child %s: %v", key, name, err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
|
|
@ -151,7 +151,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir
|
|||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d
|
|||
entry, err := store.FindEntry(ctx, path)
|
||||
lastFileName = fileName
|
||||
if err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||
glog.V(0).Infof("list %s : %v", path, err)
|
||||
if err == filer_pb.ErrNotFound {
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath we
|
|||
|
||||
iter := store.db.NewIterator(ro)
|
||||
defer iter.Close()
|
||||
err = enumerate(iter, directoryPrefix, nil, false, -1, "", func(key, value []byte) bool {
|
||||
err = enumerate(iter, directoryPrefix, nil, false, -1, func(key, value []byte) bool {
|
||||
batch.Delete(key)
|
||||
return true
|
||||
})
|
||||
|
@ -186,16 +186,23 @@ func (store *RocksDBStore) DeleteFolderChildren(ctx context.Context, fullpath we
|
|||
return nil
|
||||
}
|
||||
|
||||
func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, startFileName string, fn func(key, value []byte) bool) (err error) {
|
||||
func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey bool, limit int64, fn func(key, value []byte) bool) (err error) {
|
||||
|
||||
if len(lastKey) == 0 {
|
||||
iter.Seek(prefix)
|
||||
} else {
|
||||
iter.Seek(lastKey)
|
||||
if !includeLastKey {
|
||||
if iter.Valid() {
|
||||
if bytes.Equal(iter.Key().Data(), lastKey) {
|
||||
iter.Next()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
i := int64(0)
|
||||
for iter.Valid() {
|
||||
for ; iter.Valid(); iter.Next() {
|
||||
|
||||
if limit > 0 {
|
||||
i++
|
||||
|
@ -210,23 +217,12 @@ func enumerate(iter *gorocksdb.Iterator, prefix, lastKey []byte, includeLastKey
|
|||
break
|
||||
}
|
||||
|
||||
fileName := getNameFromKey(key)
|
||||
if fileName == "" {
|
||||
iter.Next()
|
||||
continue
|
||||
}
|
||||
if fileName == startFileName && !includeLastKey {
|
||||
iter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
ret := fn(key, iter.Value().Data())
|
||||
|
||||
if !ret {
|
||||
break
|
||||
}
|
||||
|
||||
iter.Next()
|
||||
}
|
||||
|
||||
if err := iter.Err(); err != nil {
|
||||
|
@ -253,7 +249,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
|||
|
||||
iter := store.db.NewIterator(ro)
|
||||
defer iter.Close()
|
||||
err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, startFileName, func(key, value []byte) bool {
|
||||
err = enumerate(iter, directoryPrefix, lastFileStart, includeStartFile, limit, func(key, value []byte) bool {
|
||||
fileName := getNameFromKey(key)
|
||||
if fileName == "" {
|
||||
return true
|
||||
|
@ -266,7 +262,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
|||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
return false
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
|
@ -113,73 +112,3 @@ func BenchmarkInsertEntry(b *testing.B) {
|
|||
store.InsertEntry(ctx, entry)
|
||||
}
|
||||
}
|
||||
|
||||
func TestListDirectoryWithPrefix(t *testing.T) {
|
||||
testFiler := filer.NewFiler(pb.ServerDiscovery{}, nil, "", "", "", "", "", 255, nil)
|
||||
dir := t.TempDir()
|
||||
store := &RocksDBStore{}
|
||||
store.initialize(dir)
|
||||
testFiler.SetStore(store)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
files := []string{
|
||||
"/bucket1/test-prefix1/file1.txt",
|
||||
"/bucket1/test-prefix1/file2.txt",
|
||||
"/bucket1/test-prefix1-extra.txt",
|
||||
}
|
||||
|
||||
expected1 := []string{
|
||||
"/bucket1/test-prefix1",
|
||||
"/bucket1/test-prefix1-extra.txt",
|
||||
}
|
||||
|
||||
expected2 := []string{
|
||||
"/bucket1/test-prefix1/file1.txt",
|
||||
"/bucket1/test-prefix1/file2.txt",
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
fullpath := util.FullPath(file)
|
||||
entry := &filer.Entry{
|
||||
FullPath: fullpath,
|
||||
Attr: filer.Attr{
|
||||
Mode: 0644,
|
||||
Uid: 1,
|
||||
Gid: 1,
|
||||
},
|
||||
}
|
||||
if err := testFiler.CreateEntry(ctx, entry, false, false, nil, false, testFiler.MaxFilenameLength); err != nil {
|
||||
t.Fatalf("Failed to create entry %s: %v", fullpath, err)
|
||||
}
|
||||
}
|
||||
|
||||
prefix1 := "test-prefix1"
|
||||
entries1, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/bucket1"), "", false, 100, prefix1, "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list entries with prefix %s: %v", prefix1, err)
|
||||
}
|
||||
if len(entries1) != 2 {
|
||||
t.Errorf("Expected 2 entries with prefix %s, got %d", prefix1, len(entries1))
|
||||
} else {
|
||||
t.Logf("Found %d entries with prefix %s", len(entries1), prefix1)
|
||||
}
|
||||
for i, entry := range entries1 {
|
||||
if string(entry.FullPath) != expected1[i] {
|
||||
t.Errorf("Expected entry %s, got %s", expected1[i], entry.FullPath)
|
||||
}
|
||||
}
|
||||
|
||||
entries2, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/bucket1/test-prefix1"), "", false, 100, "", "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to list entries with prefix %s: %v", prefix1, err)
|
||||
}
|
||||
if len(entries2) != 2 {
|
||||
t.Errorf("Expected 2 entries with prefix %s, got %d", prefix1, len(entries1))
|
||||
}
|
||||
for i, entry := range entries2 {
|
||||
if string(entry.FullPath) != expected2[i] {
|
||||
t.Errorf("Expected entry %s, got %s", expected2[i], entry.FullPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ func noJwtFunc(string) string {
|
|||
}
|
||||
|
||||
func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
|
||||
glog.V(4).InfofCtx(ctx, "prepare to stream content for chunks: %d", len(chunks))
|
||||
glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks))
|
||||
chunkViews := ViewFromChunks(ctx, masterClient.GetLookupFileIdFunction(), chunks, offset, size)
|
||||
|
||||
fileId2Url := make(map[string][]string)
|
||||
|
@ -96,15 +96,15 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
|
|||
if err == nil && len(urlStrings) > 0 {
|
||||
break
|
||||
}
|
||||
glog.V(4).InfofCtx(ctx, "waiting for chunk: %s", chunkView.FileId)
|
||||
glog.V(4).Infof("waiting for chunk: %s", chunkView.FileId)
|
||||
time.Sleep(backoff)
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
return nil, err
|
||||
} else if len(urlStrings) == 0 {
|
||||
errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
|
||||
glog.ErrorCtx(ctx, errUrlNotFound)
|
||||
glog.Error(errUrlNotFound)
|
||||
return nil, errUrlNotFound
|
||||
}
|
||||
fileId2Url[chunkView.FileId] = urlStrings
|
||||
|
@ -118,7 +118,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
|
|||
if offset < chunkView.ViewOffset {
|
||||
gap := chunkView.ViewOffset - offset
|
||||
remaining -= gap
|
||||
glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||
glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||
err := writeZero(writer, gap)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||
|
@ -140,7 +140,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
|
|||
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))
|
||||
}
|
||||
if remaining > 0 {
|
||||
glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, offset+remaining)
|
||||
glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining)
|
||||
err := writeZero(writer, remaining)
|
||||
if err != nil {
|
||||
return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining)
|
||||
|
@ -192,7 +192,7 @@ func ReadAll(ctx context.Context, buffer []byte, masterClient *wdclient.MasterCl
|
|||
chunkView := x.Value
|
||||
urlStrings, err := lookupFileIdFn(ctx, chunkView.FileId)
|
||||
if err != nil {
|
||||
glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -260,39 +260,39 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
|
|||
}
|
||||
|
||||
if len(results) < 1 {
|
||||
glog.ErrorfCtx(ctx, "Can't find results, data is empty")
|
||||
glog.Errorf("Can't find results, data is empty")
|
||||
return
|
||||
}
|
||||
|
||||
rows, ok := results[0].([]interface{})
|
||||
if !ok {
|
||||
glog.ErrorfCtx(ctx, "Can't convert results[0] to list")
|
||||
glog.Errorf("Can't convert results[0] to list")
|
||||
return
|
||||
}
|
||||
|
||||
for _, result := range rows {
|
||||
row, ok := result.([]interface{})
|
||||
if !ok {
|
||||
glog.ErrorfCtx(ctx, "Can't convert result to list")
|
||||
glog.Errorf("Can't convert result to list")
|
||||
return
|
||||
}
|
||||
|
||||
if len(row) < 5 {
|
||||
glog.ErrorfCtx(ctx, "Length of result is less than needed: %v", len(row))
|
||||
glog.Errorf("Length of result is less than needed: %v", len(row))
|
||||
return
|
||||
}
|
||||
|
||||
nameRaw := row[2]
|
||||
name, ok := nameRaw.(string)
|
||||
if !ok {
|
||||
glog.ErrorfCtx(ctx, "Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
|
||||
glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
|
||||
return
|
||||
}
|
||||
|
||||
dataRaw := row[4]
|
||||
data, ok := dataRaw.(string)
|
||||
if !ok {
|
||||
glog.ErrorfCtx(ctx, "Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
|
||||
glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -302,7 +302,7 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
|
|||
lastFileName = name
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
|
|
|
@ -249,7 +249,7 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
|
|||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||
err = decodeErr
|
||||
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
||||
break
|
||||
}
|
||||
if err := iter.Next(); !eachEntryFunc(entry) || err != nil {
|
||||
|
|
|
@ -22,21 +22,19 @@ const (
|
|||
deleteQuery = `
|
||||
PRAGMA TablePathPrefix("%v");
|
||||
DECLARE $dir_hash AS int64;
|
||||
DECLARE $directory AS Utf8;
|
||||
DECLARE $name AS Utf8;
|
||||
|
||||
DELETE FROM ` + asql.DEFAULT_TABLE + `
|
||||
WHERE dir_hash = $dir_hash AND directory = $directory AND name = $name;`
|
||||
WHERE dir_hash = $dir_hash AND name = $name;`
|
||||
|
||||
findQuery = `
|
||||
PRAGMA TablePathPrefix("%v");
|
||||
DECLARE $dir_hash AS int64;
|
||||
DECLARE $directory AS Utf8;
|
||||
DECLARE $name AS Utf8;
|
||||
|
||||
SELECT meta
|
||||
FROM ` + asql.DEFAULT_TABLE + `
|
||||
WHERE dir_hash = $dir_hash AND directory = $directory AND name = $name;`
|
||||
WHERE dir_hash = $dir_hash AND name = $name;`
|
||||
|
||||
deleteFolderChildrenQuery = `
|
||||
PRAGMA TablePathPrefix("%v");
|
||||
|
|
|
@ -12,9 +12,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/query"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/options"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
|
@ -23,37 +20,30 @@ import (
|
|||
environ "github.com/ydb-platform/ydb-go-sdk-auth-environ"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/result"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/result/named"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultDialTimeOut = 10
|
||||
defaultPartitionBySizeEnabled = true
|
||||
defaultPartitionSizeMb = 200
|
||||
defaultPartitionByLoadEnabled = true
|
||||
defaultMinPartitionsCount = 5
|
||||
defaultMaxPartitionsCount = 1000
|
||||
defaultMaxListChunk = 2000
|
||||
defaultDialTimeOut = 10
|
||||
)
|
||||
|
||||
var (
|
||||
roQC = query.WithTxControl(query.OnlineReadOnlyTxControl())
|
||||
rwQC = query.WithTxControl(query.DefaultTxControl())
|
||||
roTX = table.TxControl(
|
||||
table.BeginTx(table.WithOnlineReadOnly()),
|
||||
table.CommitTx(),
|
||||
)
|
||||
rwTX = table.DefaultTxControl()
|
||||
)
|
||||
|
||||
type YdbStore struct {
|
||||
DB *ydb.Driver
|
||||
dirBuckets string
|
||||
tablePathPrefix string
|
||||
SupportBucketTable bool
|
||||
partitionBySizeEnabled options.FeatureFlag
|
||||
partitionSizeMb uint64
|
||||
partitionByLoadEnabled options.FeatureFlag
|
||||
minPartitionsCount uint64
|
||||
maxPartitionsCount uint64
|
||||
maxListChunk int
|
||||
dbs map[string]bool
|
||||
dbsLock sync.Mutex
|
||||
DB ydb.Connection
|
||||
dirBuckets string
|
||||
tablePathPrefix string
|
||||
SupportBucketTable bool
|
||||
dbs map[string]bool
|
||||
dbsLock sync.Mutex
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -65,12 +55,6 @@ func (store *YdbStore) GetName() string {
|
|||
}
|
||||
|
||||
func (store *YdbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
|
||||
configuration.SetDefault(prefix+"partitionBySizeEnabled", defaultPartitionBySizeEnabled)
|
||||
configuration.SetDefault(prefix+"partitionSizeMb", defaultPartitionSizeMb)
|
||||
configuration.SetDefault(prefix+"partitionByLoadEnabled", defaultPartitionByLoadEnabled)
|
||||
configuration.SetDefault(prefix+"minPartitionsCount", defaultMinPartitionsCount)
|
||||
configuration.SetDefault(prefix+"maxPartitionsCount", defaultMaxPartitionsCount)
|
||||
configuration.SetDefault(prefix+"maxListChunk", defaultMaxListChunk)
|
||||
return store.initialize(
|
||||
configuration.GetString("filer.options.buckets_folder"),
|
||||
configuration.GetString(prefix+"dsn"),
|
||||
|
@ -78,37 +62,18 @@ func (store *YdbStore) Initialize(configuration util.Configuration, prefix strin
|
|||
configuration.GetBool(prefix+"useBucketPrefix"),
|
||||
configuration.GetInt(prefix+"dialTimeOut"),
|
||||
configuration.GetInt(prefix+"poolSizeLimit"),
|
||||
configuration.GetBool(prefix+"partitionBySizeEnabled"),
|
||||
uint64(configuration.GetInt(prefix+"partitionSizeMb")),
|
||||
configuration.GetBool(prefix+"partitionByLoadEnabled"),
|
||||
uint64(configuration.GetInt(prefix+"minPartitionsCount")),
|
||||
uint64(configuration.GetInt(prefix+"maxPartitionsCount")),
|
||||
configuration.GetInt(prefix+"maxListChunk"),
|
||||
)
|
||||
}
|
||||
|
||||
func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix string, useBucketPrefix bool, dialTimeOut int, poolSizeLimit int, partitionBySizeEnabled bool, partitionSizeMb uint64, partitionByLoadEnabled bool, minPartitionsCount uint64, maxPartitionsCount uint64, maxListChunk int) (err error) {
|
||||
func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix string, useBucketPrefix bool, dialTimeOut int, poolSizeLimit int) (err error) {
|
||||
store.dirBuckets = dirBuckets
|
||||
store.SupportBucketTable = useBucketPrefix
|
||||
if partitionBySizeEnabled {
|
||||
store.partitionBySizeEnabled = options.FeatureEnabled
|
||||
} else {
|
||||
store.partitionBySizeEnabled = options.FeatureDisabled
|
||||
}
|
||||
if partitionByLoadEnabled {
|
||||
store.partitionByLoadEnabled = options.FeatureEnabled
|
||||
} else {
|
||||
store.partitionByLoadEnabled = options.FeatureDisabled
|
||||
}
|
||||
store.partitionSizeMb = partitionSizeMb
|
||||
store.minPartitionsCount = minPartitionsCount
|
||||
store.maxPartitionsCount = maxPartitionsCount
|
||||
store.maxListChunk = maxListChunk
|
||||
if store.SupportBucketTable {
|
||||
glog.V(0).Infof("enabled BucketPrefix")
|
||||
}
|
||||
store.dbs = make(map[string]bool)
|
||||
ctx := context.Background()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
if dialTimeOut == 0 {
|
||||
dialTimeOut = defaultDialTimeOut
|
||||
}
|
||||
|
@ -124,7 +89,11 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix
|
|||
}
|
||||
store.DB, err = ydb.Open(ctx, dsn, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can not connect to %s: %w", dsn, err)
|
||||
if store.DB != nil {
|
||||
_ = store.DB.Close(ctx)
|
||||
store.DB = nil
|
||||
}
|
||||
return fmt.Errorf("can not connect to %s error: %v", dsn, err)
|
||||
}
|
||||
|
||||
store.tablePathPrefix = path.Join(store.DB.Name(), tablePathPrefix)
|
||||
|
@ -135,27 +104,29 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix
|
|||
return err
|
||||
}
|
||||
|
||||
func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.QueryParameters, ts query.ExecuteOption, processResultFunc func(res query.Result) error) (err error) {
|
||||
var res query.Result
|
||||
if tx, ok := ctx.Value("tx").(query.Transaction); ok {
|
||||
res, err = tx.Query(ctx, *q, query.WithParameters(params))
|
||||
func (store *YdbStore) doTxOrDB(ctx context.Context, query *string, params *table.QueryParameters, tc *table.TransactionControl, processResultFunc func(res result.Result) error) (err error) {
|
||||
var res result.Result
|
||||
if tx, ok := ctx.Value("tx").(table.Transaction); ok {
|
||||
res, err = tx.Execute(ctx, *query, params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("execute transaction: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
|
||||
res, err = s.Query(ctx, *q, query.WithParameters(params), ts)
|
||||
err = store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) {
|
||||
_, res, err = s.Execute(ctx, tc, *query, params)
|
||||
if err != nil {
|
||||
return fmt.Errorf("execute statement: %v", err)
|
||||
}
|
||||
return nil
|
||||
}, query.WithIdempotent())
|
||||
},
|
||||
table.WithIdempotent(),
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res != nil {
|
||||
defer func() { _ = res.Close(ctx) }()
|
||||
defer func() { _ = res.Close() }()
|
||||
if processResultFunc != nil {
|
||||
if err = processResultFunc(res); err != nil {
|
||||
return fmt.Errorf("process result: %v", err)
|
||||
|
@ -177,7 +148,7 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent
|
|||
}
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
fileMeta := FileMeta{util.HashStringToLong(dir), name, *shortDir, meta}
|
||||
return store.doTxOrDB(ctx, withPragma(tablePathPrefix, upsertQuery), fileMeta.queryParameters(entry.TtlSec), rwQC, nil)
|
||||
return store.doTxOrDB(ctx, withPragma(tablePathPrefix, upsertQuery), fileMeta.queryParameters(entry.TtlSec), rwTX, nil)
|
||||
}
|
||||
|
||||
func (store *YdbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||
|
@ -193,29 +164,23 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e
|
|||
var data []byte
|
||||
entryFound := false
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
q := withPragma(tablePathPrefix, findQuery)
|
||||
query := withPragma(tablePathPrefix, findQuery)
|
||||
queryParams := table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
||||
table.ValueParam("$name", types.UTF8Value(name)))
|
||||
|
||||
err = store.doTxOrDB(ctx, q, queryParams, roQC, func(res query.Result) error {
|
||||
for rs, err := range res.ResultSets(ctx) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for row, err := range rs.Rows(ctx) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if scanErr := row.Scan(&data); scanErr != nil {
|
||||
return fmt.Errorf("scan %s: %v", fullpath, scanErr)
|
||||
}
|
||||
entryFound = true
|
||||
return nil
|
||||
}
|
||||
err = store.doTxOrDB(ctx, query, queryParams, roTX, func(res result.Result) error {
|
||||
if !res.NextResultSet(ctx) || !res.HasNextRow() {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
for res.NextRow() {
|
||||
if err = res.ScanNamed(named.OptionalWithDefault("meta", &data)); err != nil {
|
||||
return fmt.Errorf("scanNamed %s : %v", fullpath, err)
|
||||
}
|
||||
entryFound = true
|
||||
return nil
|
||||
}
|
||||
return res.Err()
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -224,35 +189,37 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e
|
|||
return nil, filer_pb.ErrNotFound
|
||||
}
|
||||
|
||||
entry = &filer.Entry{FullPath: fullpath}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
return nil, fmt.Errorf("decode %s: %v", fullpath, decodeErr)
|
||||
entry = &filer.Entry{
|
||||
FullPath: fullpath,
|
||||
}
|
||||
if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
||||
return nil, fmt.Errorf("decode %s : %v", fullpath, err)
|
||||
}
|
||||
|
||||
return entry, nil
|
||||
}
|
||||
|
||||
func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||
dir, name := fullpath.DirAndName()
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
q := withPragma(tablePathPrefix, deleteQuery)
|
||||
glog.V(4).InfofCtx(ctx, "DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
|
||||
query := withPragma(tablePathPrefix, deleteQuery)
|
||||
glog.V(4).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
|
||||
queryParams := table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
||||
table.ValueParam("$name", types.UTF8Value(name)))
|
||||
|
||||
return store.doTxOrDB(ctx, q, queryParams, rwQC, nil)
|
||||
return store.doTxOrDB(ctx, query, queryParams, rwTX, nil)
|
||||
}
|
||||
|
||||
func (store *YdbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||
dir := string(fullpath)
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
q := withPragma(tablePathPrefix, deleteFolderChildrenQuery)
|
||||
query := withPragma(tablePathPrefix, deleteFolderChildrenQuery)
|
||||
queryParams := table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||
table.ValueParam("$directory", types.UTF8Value(*shortDir)))
|
||||
|
||||
return store.doTxOrDB(ctx, q, queryParams, rwQC, nil)
|
||||
return store.doTxOrDB(ctx, query, queryParams, rwTX, nil)
|
||||
}
|
||||
|
||||
func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
|
@ -262,79 +229,71 @@ func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.Fu
|
|||
func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||
dir := string(dirPath)
|
||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||
baseInclusive := withPragma(tablePathPrefix, listInclusiveDirectoryQuery)
|
||||
baseExclusive := withPragma(tablePathPrefix, listDirectoryQuery)
|
||||
var entryCount int64
|
||||
var prevFetchedLessThanChunk bool
|
||||
for entryCount < limit {
|
||||
if prevFetchedLessThanChunk {
|
||||
break
|
||||
var query *string
|
||||
if includeStartFile {
|
||||
query = withPragma(tablePathPrefix, listInclusiveDirectoryQuery)
|
||||
} else {
|
||||
query = withPragma(tablePathPrefix, listDirectoryQuery)
|
||||
}
|
||||
truncated := true
|
||||
eachEntryFuncIsNotBreake := true
|
||||
entryCount := int64(0)
|
||||
for truncated && eachEntryFuncIsNotBreake {
|
||||
if lastFileName != "" {
|
||||
startFileName = lastFileName
|
||||
if includeStartFile {
|
||||
query = withPragma(tablePathPrefix, listDirectoryQuery)
|
||||
}
|
||||
}
|
||||
var q *string
|
||||
if entryCount == 0 && includeStartFile {
|
||||
q = baseInclusive
|
||||
} else {
|
||||
q = baseExclusive
|
||||
restLimit := limit - entryCount
|
||||
const maxChunk = int64(1000)
|
||||
chunkLimit := restLimit
|
||||
if chunkLimit > maxChunk {
|
||||
chunkLimit = maxChunk
|
||||
}
|
||||
rest := limit - entryCount
|
||||
chunkLimit := rest
|
||||
if chunkLimit > int64(store.maxListChunk) {
|
||||
chunkLimit = int64(store.maxListChunk)
|
||||
}
|
||||
var rowCount int64
|
||||
glog.V(4).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit)
|
||||
|
||||
params := table.NewQueryParameters(
|
||||
queryParams := table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
||||
table.ValueParam("$start_name", types.UTF8Value(startFileName)),
|
||||
table.ValueParam("$prefix", types.UTF8Value(prefix+"%")),
|
||||
table.ValueParam("$limit", types.Uint64Value(uint64(chunkLimit))),
|
||||
)
|
||||
|
||||
err := store.doTxOrDB(ctx, q, params, roQC, func(res query.Result) error {
|
||||
for rs, err := range res.ResultSets(ctx) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for row, err := range rs.Rows(ctx) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var name string
|
||||
var data []byte
|
||||
if scanErr := row.Scan(&name, &data); scanErr != nil {
|
||||
return fmt.Errorf("scan %s: %w", dir, scanErr)
|
||||
}
|
||||
|
||||
lastFileName = name
|
||||
entry := &filer.Entry{FullPath: util.NewFullPath(dir, name)}
|
||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||
return fmt.Errorf("decode entry %s: %w", entry.FullPath, decodeErr)
|
||||
}
|
||||
|
||||
if !eachEntryFunc(entry) {
|
||||
return nil
|
||||
}
|
||||
|
||||
rowCount++
|
||||
entryCount++
|
||||
startFileName = lastFileName
|
||||
|
||||
if entryCount >= limit {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
err = store.doTxOrDB(ctx, query, queryParams, roTX, func(res result.Result) error {
|
||||
var name string
|
||||
var data []byte
|
||||
if !res.NextResultSet(ctx) || !res.HasNextRow() {
|
||||
truncated = false
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
truncated = res.CurrentResultSet().Truncated()
|
||||
glog.V(4).Infof("truncated %v, entryCount %d", truncated, entryCount)
|
||||
for res.NextRow() {
|
||||
if err := res.ScanNamed(
|
||||
named.OptionalWithDefault("name", &name),
|
||||
named.OptionalWithDefault("meta", &data)); err != nil {
|
||||
return fmt.Errorf("list scanNamed %s : %v", dir, err)
|
||||
}
|
||||
glog.V(8).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name))
|
||||
lastFileName = name
|
||||
entry := &filer.Entry{
|
||||
FullPath: util.NewFullPath(dir, name),
|
||||
}
|
||||
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
||||
return fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
||||
}
|
||||
if !eachEntryFunc(entry) {
|
||||
eachEntryFuncIsNotBreake = false
|
||||
break
|
||||
}
|
||||
entryCount += 1
|
||||
}
|
||||
return res.Err()
|
||||
})
|
||||
if err != nil {
|
||||
return lastFileName, err
|
||||
}
|
||||
|
||||
if rowCount < chunkLimit {
|
||||
prevFetchedLessThanChunk = true
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return lastFileName, err
|
||||
}
|
||||
return lastFileName, nil
|
||||
}
|
||||
|
@ -421,7 +380,7 @@ func (store *YdbStore) OnBucketDeletion(bucket string) {
|
|||
|
||||
func (store *YdbStore) createTable(ctx context.Context, prefix string) error {
|
||||
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
return s.CreateTable(ctx, path.Join(prefix, abstract_sql.DEFAULT_TABLE), store.createTableOptions()...)
|
||||
return s.CreateTable(ctx, path.Join(prefix, abstract_sql.DEFAULT_TABLE), createTableOptions()...)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -434,7 +393,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error {
|
|||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).InfofCtx(ctx, "deleted table %s", prefix)
|
||||
glog.V(4).Infof("deleted table %s", prefix)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -447,11 +406,11 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
|||
}
|
||||
|
||||
prefixBuckets := store.dirBuckets + "/"
|
||||
glog.V(4).InfofCtx(ctx, "dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
|
||||
glog.V(4).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
|
||||
if strings.HasPrefix(*dir, prefixBuckets) {
|
||||
// detect bucket
|
||||
bucketAndDir := (*dir)[len(prefixBuckets):]
|
||||
glog.V(4).InfofCtx(ctx, "bucketAndDir: %s", bucketAndDir)
|
||||
glog.V(4).Infof("bucketAndDir: %s", bucketAndDir)
|
||||
var bucket string
|
||||
if t := strings.Index(bucketAndDir, "/"); t > 0 {
|
||||
bucket = bucketAndDir[:t]
|
||||
|
@ -465,22 +424,16 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
|||
store.dbsLock.Lock()
|
||||
defer store.dbsLock.Unlock()
|
||||
|
||||
tablePathPrefixWithBucket := path.Join(store.tablePathPrefix, bucket)
|
||||
if _, found := store.dbs[bucket]; !found {
|
||||
glog.V(4).InfofCtx(ctx, "bucket %q not in cache, verifying existence via DescribeTable", bucket)
|
||||
tablePath := path.Join(store.tablePathPrefix, bucket, abstract_sql.DEFAULT_TABLE)
|
||||
err2 := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
_, err3 := s.DescribeTable(ctx, tablePath)
|
||||
return err3
|
||||
})
|
||||
if err2 != nil {
|
||||
glog.V(4).InfofCtx(ctx, "bucket %q not found (DescribeTable %s failed)", bucket, tablePath)
|
||||
return
|
||||
if err := store.createTable(ctx, tablePathPrefixWithBucket); err == nil {
|
||||
store.dbs[bucket] = true
|
||||
glog.V(4).Infof("created table %s", tablePathPrefixWithBucket)
|
||||
} else {
|
||||
glog.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err)
|
||||
}
|
||||
glog.V(4).InfofCtx(ctx, "bucket %q exists, adding to cache", bucket)
|
||||
store.dbs[bucket] = true
|
||||
}
|
||||
bucketPrefix := path.Join(store.tablePathPrefix, bucket)
|
||||
tablePathPrefix = &bucketPrefix
|
||||
tablePathPrefix = &tablePathPrefixWithBucket
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -488,25 +441,25 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
|||
func (store *YdbStore) ensureTables(ctx context.Context) error {
|
||||
prefixFull := store.tablePathPrefix
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "creating base table %s", prefixFull)
|
||||
glog.V(4).Infof("creating base table %s", prefixFull)
|
||||
baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE)
|
||||
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
return s.CreateTable(ctx, baseTable, store.createTableOptions()...)
|
||||
return s.CreateTable(ctx, baseTable, createTableOptions()...)
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to create base table %s: %v", baseTable, err)
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "creating bucket tables")
|
||||
glog.V(4).Infof("creating bucket tables")
|
||||
if store.SupportBucketTable {
|
||||
store.dbsLock.Lock()
|
||||
defer store.dbsLock.Unlock()
|
||||
for bucket := range store.dbs {
|
||||
glog.V(4).InfofCtx(ctx, "creating bucket table %s", bucket)
|
||||
glog.V(4).Infof("creating bucket table %s", bucket)
|
||||
bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE)
|
||||
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
return s.CreateTable(ctx, bucketTable, store.createTableOptions()...)
|
||||
return s.CreateTable(ctx, bucketTable, createTableOptions()...)
|
||||
}); err != nil {
|
||||
glog.ErrorfCtx(ctx, "failed to create bucket table %s: %v", bucketTable, err)
|
||||
glog.Errorf("failed to create bucket table %s: %v", bucketTable, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,54 +9,48 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/query"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/result/named"
|
||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"
|
||||
)
|
||||
|
||||
func (store *YdbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
||||
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
||||
fileMeta := FileMeta{dirHash, name, dirStr, value}
|
||||
return store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
|
||||
_, err = s.Query(ctx, *withPragma(&store.tablePathPrefix, upsertQuery),
|
||||
query.WithParameters(fileMeta.queryParameters(0)), rwQC)
|
||||
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) {
|
||||
_, _, err = s.Execute(ctx, rwTX, *withPragma(&store.tablePathPrefix, upsertQuery),
|
||||
fileMeta.queryParameters(0))
|
||||
if err != nil {
|
||||
return fmt.Errorf("kv put execute %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||
}
|
||||
return nil
|
||||
}, query.WithIdempotent())
|
||||
})
|
||||
}
|
||||
|
||||
func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
|
||||
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
||||
valueFound := false
|
||||
err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) error {
|
||||
res, err := s.Query(ctx, *withPragma(&store.tablePathPrefix, findQuery),
|
||||
query.WithParameters(table.NewQueryParameters(
|
||||
err = store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||
_, res, err := s.Execute(ctx, roTX, *withPragma(&store.tablePathPrefix, findQuery),
|
||||
table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(dirHash)),
|
||||
table.ValueParam("$directory", types.UTF8Value(dirStr)),
|
||||
table.ValueParam("$name", types.UTF8Value(name)))), roQC)
|
||||
table.ValueParam("$name", types.UTF8Value(name))))
|
||||
if err != nil {
|
||||
return fmt.Errorf("kv get execute %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||
}
|
||||
defer func() { _ = res.Close(ctx) }()
|
||||
for rs, err := range res.ResultSets(ctx) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for row, err := range rs.Rows(ctx) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := row.Scan(&value); err != nil {
|
||||
return fmt.Errorf("scan %s : %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||
}
|
||||
valueFound = true
|
||||
return nil
|
||||
}
|
||||
defer func() { _ = res.Close() }()
|
||||
if !res.NextResultSet(ctx) || !res.HasNextRow() {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}, query.WithIdempotent())
|
||||
for res.NextRow() {
|
||||
if err := res.ScanNamed(named.OptionalWithDefault("meta", &value)); err != nil {
|
||||
return fmt.Errorf("scanNamed %s : %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||
}
|
||||
valueFound = true
|
||||
return nil
|
||||
}
|
||||
return res.Err()
|
||||
})
|
||||
|
||||
if !valueFound {
|
||||
return nil, filer.ErrKvNotFound
|
||||
|
@ -67,16 +61,15 @@ func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err
|
|||
|
||||
func (store *YdbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
||||
return store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
|
||||
_, err = s.Query(ctx, *withPragma(&store.tablePathPrefix, deleteQuery),
|
||||
query.WithParameters(table.NewQueryParameters(
|
||||
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) {
|
||||
_, _, err = s.Execute(ctx, rwTX, *withPragma(&store.tablePathPrefix, deleteQuery),
|
||||
table.NewQueryParameters(
|
||||
table.ValueParam("$dir_hash", types.Int64Value(dirHash)),
|
||||
table.ValueParam("$directory", types.UTF8Value(dirStr)),
|
||||
table.ValueParam("$name", types.UTF8Value(name)))), rwQC)
|
||||
table.ValueParam("$name", types.UTF8Value(name))))
|
||||
if err != nil {
|
||||
return fmt.Errorf("kv delete %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||
}
|
||||
return nil
|
||||
}, query.WithIdempotent())
|
||||
})
|
||||
|
||||
}
|
||||
|
|
|
@ -13,8 +13,7 @@ func TestStore(t *testing.T) {
|
|||
// to set up local env
|
||||
if false {
|
||||
store := &YdbStore{}
|
||||
store.initialize("/buckets", "grpc://localhost:2136/?database=local", "seaweedfs", true, 10, 50,
|
||||
true, 200, true, 5, 1000, 2000)
|
||||
store.initialize("/buckets", "grpc://localhost:2136/?database=local", "seaweedfs", true, 10, 50)
|
||||
store_test.TestFilerStore(t, store)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,35 +30,26 @@ func (fm *FileMeta) queryParameters(ttlSec int32) *table.QueryParameters {
|
|||
table.ValueParam("$dir_hash", types.Int64Value(fm.DirHash)),
|
||||
table.ValueParam("$directory", types.UTF8Value(fm.Directory)),
|
||||
table.ValueParam("$name", types.UTF8Value(fm.Name)),
|
||||
table.ValueParam("$meta", types.BytesValue(fm.Meta)),
|
||||
table.ValueParam("$meta", types.StringValue(fm.Meta)),
|
||||
table.ValueParam("$expire_at", expireAtValue))
|
||||
}
|
||||
|
||||
func (store *YdbStore) createTableOptions() []options.CreateTableOption {
|
||||
func createTableOptions() []options.CreateTableOption {
|
||||
columnUnit := options.TimeToLiveUnitSeconds
|
||||
return []options.CreateTableOption{
|
||||
options.WithColumn("dir_hash", types.TypeInt64),
|
||||
options.WithColumn("directory", types.TypeUTF8),
|
||||
options.WithColumn("name", types.TypeUTF8),
|
||||
options.WithColumn("meta", types.TypeString),
|
||||
options.WithColumn("dir_hash", types.Optional(types.TypeInt64)),
|
||||
options.WithColumn("directory", types.Optional(types.TypeUTF8)),
|
||||
options.WithColumn("name", types.Optional(types.TypeUTF8)),
|
||||
options.WithColumn("meta", types.Optional(types.TypeString)),
|
||||
options.WithColumn("expire_at", types.Optional(types.TypeUint32)),
|
||||
options.WithPrimaryKeyColumn("dir_hash", "directory", "name"),
|
||||
options.WithPrimaryKeyColumn("dir_hash", "name"),
|
||||
options.WithTimeToLiveSettings(options.TimeToLiveSettings{
|
||||
ColumnName: "expire_at",
|
||||
ColumnUnit: &columnUnit,
|
||||
Mode: options.TimeToLiveModeValueSinceUnixEpoch},
|
||||
),
|
||||
options.WithPartitioningSettings(
|
||||
options.WithPartitioningBy([]string{"dir_hash", "name"}),
|
||||
options.WithPartitioningBySize(store.partitionBySizeEnabled),
|
||||
options.WithPartitionSizeMb(store.partitionSizeMb),
|
||||
options.WithPartitioningByLoad(store.partitionByLoadEnabled),
|
||||
options.WithMinPartitionsCount(store.minPartitionsCount),
|
||||
options.WithMaxPartitionsCount(store.maxPartitionsCount),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
func withPragma(prefix *string, query string) *string {
|
||||
queryWithPragma := fmt.Sprintf(query, *prefix)
|
||||
return &queryWithPragma
|
||||
|
|
|
@ -1,246 +0,0 @@
|
|||
package glog
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
reqid "github.com/seaweedfs/seaweedfs/weed/util/request_id"
|
||||
)
|
||||
|
||||
const requestIDField = "request_id"
|
||||
|
||||
// formatMetaTag returns a formatted request ID tag from the context,
|
||||
// like "request_id:abc123". Returns an empty string if no request ID is found.
|
||||
func formatMetaTag(ctx context.Context) string {
|
||||
if requestID := reqid.Get(ctx); requestID != "" {
|
||||
return fmt.Sprintf("%s:%s", requestIDField, requestID)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// InfoCtx is a context-aware alternative to Verbose.Info.
|
||||
// Logs to the INFO log, guarded by the value of v, and prepends a request ID from the context if present.
|
||||
// Arguments are handled in the manner of fmt.Print.
|
||||
func (v Verbose) InfoCtx(ctx context.Context, args ...interface{}) {
|
||||
if !v {
|
||||
return
|
||||
}
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.print(infoLog, args...)
|
||||
}
|
||||
|
||||
// InfolnCtx is a context-aware alternative to Verbose.Infoln.
|
||||
// Logs to the INFO log, prepending a request ID from the context if it exists.
|
||||
// Arguments are handled in the manner of fmt.Println.
|
||||
func (v Verbose) InfolnCtx(ctx context.Context, args ...interface{}) {
|
||||
if !v {
|
||||
return
|
||||
}
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.println(infoLog, args...)
|
||||
}
|
||||
|
||||
// InfofCtx is a context-aware alternative to Verbose.Infof.
|
||||
// Logs to the INFO log, guarded by the value of v, and prepends a request ID from the context if present.
|
||||
// Arguments are handled in the manner of fmt.Printf.
|
||||
func (v Verbose) InfofCtx(ctx context.Context, format string, args ...interface{}) {
|
||||
if !v {
|
||||
return
|
||||
}
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
format = metaTag + " " + format
|
||||
}
|
||||
logging.printf(infoLog, format, args...)
|
||||
}
|
||||
|
||||
// InfofCtx logs a formatted message at info level, prepending a request ID from
|
||||
// the context if it exists. This is a context-aware alternative to Infof.
|
||||
func InfofCtx(ctx context.Context, format string, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
format = metaTag + " " + format
|
||||
}
|
||||
logging.printf(infoLog, format, args...)
|
||||
}
|
||||
|
||||
// InfoCtx logs a message at info level, prepending a request ID from the context
|
||||
// if it exists. This is a context-aware alternative to Info.
|
||||
func InfoCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.print(infoLog, args...)
|
||||
}
|
||||
|
||||
// WarningCtx logs to the WARNING and INFO logs.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to Warning.
|
||||
func WarningCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.print(warningLog, args...)
|
||||
}
|
||||
|
||||
// WarningDepthCtx logs to the WARNING and INFO logs with a custom call depth.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to WarningDepth.
|
||||
func WarningDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.printDepth(warningLog, depth, args...)
|
||||
}
|
||||
|
||||
// WarninglnCtx logs to the WARNING and INFO logs.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||
// This is a context-aware alternative to Warningln.
|
||||
func WarninglnCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.println(warningLog, args...)
|
||||
}
|
||||
|
||||
// WarningfCtx logs to the WARNING and INFO logs.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||
// This is a context-aware alternative to Warningf.
|
||||
func WarningfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
format = metaTag + " " + format
|
||||
}
|
||||
logging.printf(warningLog, format, args...)
|
||||
}
|
||||
|
||||
// ErrorCtx logs to the ERROR, WARNING, and INFO logs.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to Error.
|
||||
func ErrorCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.print(errorLog, args...)
|
||||
}
|
||||
|
||||
// ErrorDepthCtx logs to the ERROR, WARNING, and INFO logs with a custom call depth.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to ErrorDepth.
|
||||
func ErrorDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.printDepth(errorLog, depth, args...)
|
||||
}
|
||||
|
||||
// ErrorlnCtx logs to the ERROR, WARNING, and INFO logs.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||
// This is a context-aware alternative to Errorln.
|
||||
func ErrorlnCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.println(errorLog, args...)
|
||||
}
|
||||
|
||||
// ErrorfCtx logs to the ERROR, WARNING, and INFO logs.
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||
// This is a context-aware alternative to Errorf.
|
||||
func ErrorfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
format = metaTag + " " + format
|
||||
}
|
||||
logging.printf(errorLog, format, args...)
|
||||
}
|
||||
|
||||
// FatalCtx logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to Fatal.
|
||||
func FatalCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.print(fatalLog, args...)
|
||||
}
|
||||
|
||||
// FatalDepthCtx logs to the FATAL, ERROR, WARNING, and INFO logs with a custom call depth,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to FatalDepth.
|
||||
func FatalDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.printDepth(fatalLog, depth, args...)
|
||||
}
|
||||
|
||||
// FatallnCtx logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||
// This is a context-aware alternative to Fatalln.
|
||||
func FatallnCtx(ctx context.Context, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.println(fatalLog, args...)
|
||||
}
|
||||
|
||||
// FatalfCtx logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||
// This is a context-aware alternative to Fatalf.
|
||||
func FatalfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
format = metaTag + " " + format
|
||||
}
|
||||
logging.printf(fatalLog, format, args...)
|
||||
}
|
||||
|
||||
// ExitCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to ExitCtx
|
||||
func ExitCtx(ctx context.Context, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.print(fatalLog, args...)
|
||||
}
|
||||
|
||||
// ExitDepthCtx logs to the FATAL, ERROR, WARNING, and INFO logs with a custom call depth,
|
||||
// then calls os.Exit(1). Prepends a request ID from the context if it exists.
|
||||
// Arguments are handled in the manner of fmt.Print.
|
||||
// This is a context-aware alternative to ExitDepth.
|
||||
func ExitDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.printDepth(fatalLog, depth, args...)
|
||||
}
|
||||
|
||||
// ExitlnCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||
// This is a context-aware alternative to Exitln.
|
||||
func ExitlnCtx(ctx context.Context, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
args = append([]interface{}{metaTag}, args...)
|
||||
}
|
||||
logging.println(fatalLog, args...)
|
||||
}
|
||||
|
||||
// ExitfCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||
// This is a context-aware alternative to Exitf.
|
||||
func ExitfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||
format = metaTag + " " + format
|
||||
}
|
||||
logging.printf(fatalLog, format, args...)
|
||||
}
|
|
@ -3,11 +3,11 @@ package mount
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
|
@ -23,7 +23,6 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/grace"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||
"github.com/seaweedfs/seaweedfs/weed/wdclient"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fs"
|
||||
|
@ -72,21 +71,19 @@ type WFS struct {
|
|||
fuse.RawFileSystem
|
||||
mount_pb.UnimplementedSeaweedMountServer
|
||||
fs.Inode
|
||||
option *Option
|
||||
metaCache *meta_cache.MetaCache
|
||||
stats statsCache
|
||||
chunkCache *chunk_cache.TieredChunkCache
|
||||
signature int32
|
||||
concurrentWriters *util.LimitedConcurrentExecutor
|
||||
copyBufferPool sync.Pool
|
||||
concurrentCopiersSem chan struct{}
|
||||
inodeToPath *InodeToPath
|
||||
fhMap *FileHandleToInode
|
||||
dhMap *DirectoryHandleToInode
|
||||
fuseServer *fuse.Server
|
||||
IsOverQuota bool
|
||||
fhLockTable *util.LockTable[FileHandleId]
|
||||
FilerConf *filer.FilerConf
|
||||
option *Option
|
||||
metaCache *meta_cache.MetaCache
|
||||
stats statsCache
|
||||
chunkCache *chunk_cache.TieredChunkCache
|
||||
signature int32
|
||||
concurrentWriters *util.LimitedConcurrentExecutor
|
||||
inodeToPath *InodeToPath
|
||||
fhMap *FileHandleToInode
|
||||
dhMap *DirectoryHandleToInode
|
||||
fuseServer *fuse.Server
|
||||
IsOverQuota bool
|
||||
fhLockTable *util.LockTable[FileHandleId]
|
||||
FilerConf *filer.FilerConf
|
||||
}
|
||||
|
||||
func NewSeaweedFileSystem(option *Option) *WFS {
|
||||
|
@ -142,10 +139,6 @@ func NewSeaweedFileSystem(option *Option) *WFS {
|
|||
|
||||
if wfs.option.ConcurrentWriters > 0 {
|
||||
wfs.concurrentWriters = util.NewLimitedConcurrentExecutor(wfs.option.ConcurrentWriters)
|
||||
wfs.concurrentCopiersSem = make(chan struct{}, wfs.option.ConcurrentWriters)
|
||||
}
|
||||
wfs.copyBufferPool.New = func() any {
|
||||
return make([]byte, option.ChunkSizeLimit)
|
||||
}
|
||||
return wfs
|
||||
}
|
||||
|
@ -190,6 +183,7 @@ func (wfs *WFS) maybeReadEntry(inode uint64) (path util.FullPath, fh *FileHandle
|
|||
}
|
||||
|
||||
func (wfs *WFS) maybeLoadEntry(fullpath util.FullPath) (*filer_pb.Entry, fuse.Status) {
|
||||
|
||||
// glog.V(3).Infof("read entry cache miss %s", fullpath)
|
||||
dir, name := fullpath.DirAndName()
|
||||
|
||||
|
|
|
@ -1,8 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
func setBlksize(out *fuse.Attr, size uint32) {
|
||||
}
|
|
@ -1,13 +1,13 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
)
|
||||
|
||||
// CopyFileRange copies data from one file to another from and to specified offsets.
|
||||
|
@ -70,85 +70,30 @@ func (wfs *WFS) CopyFileRange(cancel <-chan struct{}, in *fuse.CopyFileRangeIn)
|
|||
in.OffOut, in.OffOut+in.Len,
|
||||
)
|
||||
|
||||
// Concurrent copy operations could allocate too much memory, so we want to
|
||||
// throttle our concurrency, scaling with the number of writers the mount
|
||||
// was configured with.
|
||||
if wfs.concurrentCopiersSem != nil {
|
||||
wfs.concurrentCopiersSem <- struct{}{}
|
||||
defer func() { <-wfs.concurrentCopiersSem }()
|
||||
data := make([]byte, in.Len)
|
||||
totalRead, err := readDataByFileHandle(data, fhIn, int64(in.OffIn))
|
||||
if err != nil {
|
||||
glog.Warningf("file handle read %s %d: %v", fhIn.FullPath(), totalRead, err)
|
||||
return 0, fuse.EIO
|
||||
}
|
||||
data = data[:totalRead]
|
||||
|
||||
// We want to stream the copy operation to avoid allocating massive buffers.
|
||||
nowUnixNano := time.Now().UnixNano()
|
||||
totalCopied := int64(0)
|
||||
buff := wfs.copyBufferPool.Get().([]byte)
|
||||
defer wfs.copyBufferPool.Put(buff)
|
||||
for {
|
||||
// Comply with cancellation as best as we can, given that the underlying
|
||||
// IO functions aren't cancellation-aware.
|
||||
select {
|
||||
case <-cancel:
|
||||
glog.Warningf("canceled CopyFileRange for %s (copied %d)",
|
||||
fhIn.FullPath(), totalCopied)
|
||||
return uint32(totalCopied), fuse.EINTR
|
||||
default: // keep going
|
||||
}
|
||||
|
||||
// We can save one IO by breaking early if we already know the next read
|
||||
// will result in zero bytes.
|
||||
remaining := int64(in.Len) - totalCopied
|
||||
readLen := min(remaining, int64(len(buff)))
|
||||
if readLen == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Perform the read
|
||||
offsetIn := totalCopied + int64(in.OffIn)
|
||||
numBytesRead, err := readDataByFileHandle(
|
||||
buff[:readLen], fhIn, offsetIn)
|
||||
if err != nil {
|
||||
glog.Warningf("file handle read %s %d (total %d): %v",
|
||||
fhIn.FullPath(), numBytesRead, totalCopied, err)
|
||||
return 0, fuse.EIO
|
||||
}
|
||||
|
||||
// Break if we're done copying (no more bytes to read)
|
||||
if numBytesRead == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
offsetOut := int64(in.OffOut) + totalCopied
|
||||
|
||||
// Detect mime type only during the beginning of our stream, since
|
||||
// DetectContentType is expecting some of the first 512 bytes of the
|
||||
// file. See [http.DetectContentType] for details.
|
||||
if offsetOut <= 512 {
|
||||
fhOut.contentType = http.DetectContentType(buff[:numBytesRead])
|
||||
}
|
||||
|
||||
// Perform the write
|
||||
fhOut.dirtyPages.writerPattern.MonitorWriteAt(offsetOut, int(numBytesRead))
|
||||
fhOut.dirtyPages.AddPage(
|
||||
offsetOut,
|
||||
buff[:numBytesRead],
|
||||
fhOut.dirtyPages.writerPattern.IsSequentialMode(),
|
||||
nowUnixNano)
|
||||
|
||||
// Accumulate for the next loop iteration
|
||||
totalCopied += numBytesRead
|
||||
}
|
||||
|
||||
if totalCopied == 0 {
|
||||
if totalRead == 0 {
|
||||
return 0, fuse.OK
|
||||
}
|
||||
|
||||
fhOut.entry.Attributes.FileSize = uint64(max(
|
||||
totalCopied+int64(in.OffOut),
|
||||
int64(fhOut.entry.Attributes.FileSize),
|
||||
))
|
||||
// put data at the specified offset in target file
|
||||
fhOut.dirtyPages.writerPattern.MonitorWriteAt(int64(in.OffOut), int(in.Len))
|
||||
fhOut.entry.Content = nil
|
||||
fhOut.dirtyPages.AddPage(int64(in.OffOut), data, fhOut.dirtyPages.writerPattern.IsSequentialMode(), time.Now().UnixNano())
|
||||
fhOut.entry.Attributes.FileSize = uint64(max(int64(in.OffOut)+totalRead, int64(fhOut.entry.Attributes.FileSize)))
|
||||
fhOut.dirtyMetadata = true
|
||||
written = uint32(totalRead)
|
||||
|
||||
// detect mime type
|
||||
if written > 0 && in.OffOut <= 512 {
|
||||
fhOut.contentType = http.DetectContentType(data)
|
||||
}
|
||||
|
||||
written = uint32(totalCopied)
|
||||
return written, fuse.OK
|
||||
}
|
||||
|
|
|
@ -1,6 +1,3 @@
|
|||
//go:build !freebsd
|
||||
// +build !freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
func (wfs *WFS) GetXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string, dest []byte) (size uint32, code fuse.Status) {
|
||||
|
||||
return 0, fuse.Status(syscall.ENOTSUP)
|
||||
}
|
||||
|
||||
func (wfs *WFS) SetXAttr(cancel <-chan struct{}, input *fuse.SetXAttrIn, attr string, data []byte) fuse.Status {
|
||||
|
||||
return fuse.Status(syscall.ENOTSUP)
|
||||
}
|
||||
|
||||
func (wfs *WFS) ListXAttr(cancel <-chan struct{}, header *fuse.InHeader, dest []byte) (n uint32, code fuse.Status) {
|
||||
|
||||
return 0, fuse.Status(syscall.ENOTSUP)
|
||||
}
|
||||
|
||||
func (wfs *WFS) RemoveXAttr(cancel <-chan struct{}, header *fuse.InHeader, attr string) fuse.Status {
|
||||
|
||||
return fuse.Status(syscall.ENOTSUP)
|
||||
}
|
|
@ -4,10 +4,6 @@ import (
|
|||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/parquet-go/parquet-go"
|
||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
|
||||
|
@ -17,6 +13,9 @@ import (
|
|||
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"io"
|
||||
"math"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -43,6 +42,10 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
|
|||
WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes).
|
||||
RecordTypeEnd()
|
||||
|
||||
parquetSchema, err := schema.ToParquetSchema(t.Name, recordType)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
parquetLevels, err := schema.ToParquetLevels(recordType)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
@ -58,12 +61,11 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
|
|||
readerAt := filer.NewChunkReaderAtFromClient(readerCache, chunkViews, int64(fileSize))
|
||||
|
||||
// create parquet reader
|
||||
parquetReader := parquet.NewReader(readerAt)
|
||||
parquetReader := parquet.NewReader(readerAt, parquetSchema)
|
||||
rows := make([]parquet.Row, 128)
|
||||
for {
|
||||
rowCount, readErr := parquetReader.ReadRows(rows)
|
||||
|
||||
// Process the rows first, even if EOF is returned
|
||||
for i := 0; i < rowCount; i++ {
|
||||
row := rows[i]
|
||||
// convert parquet row to schema_pb.RecordValue
|
||||
|
@ -97,16 +99,12 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
|
|||
}
|
||||
}
|
||||
|
||||
// Check for end conditions after processing rows
|
||||
if readErr != nil {
|
||||
if readErr == io.EOF {
|
||||
return processedTsNs, nil
|
||||
}
|
||||
return processedTsNs, readErr
|
||||
}
|
||||
if rowCount == 0 {
|
||||
return processedTsNs, nil
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,13 +2,12 @@ package schema
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/parquet-go/parquet-go"
|
||||
"github.com/parquet-go/parquet-go/compress/zstd"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestWriteReadParquet(t *testing.T) {
|
||||
|
@ -126,25 +125,16 @@ func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parque
|
|||
t.Fatalf("os.Open failed: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
// Get file info to determine size
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
t.Fatalf("file.Stat failed: %v", err)
|
||||
}
|
||||
|
||||
// Create a parquet file from the opened file
|
||||
parquetFile, err := parquet.OpenFile(file, fileInfo.Size())
|
||||
if err != nil {
|
||||
t.Fatalf("parquet.OpenFile failed: %v", err)
|
||||
}
|
||||
|
||||
reader := parquet.NewReader(parquetFile)
|
||||
reader := parquet.NewReader(file, parquetSchema)
|
||||
rows := make([]parquet.Row, 128)
|
||||
for {
|
||||
rowCount, err := reader.ReadRows(rows)
|
||||
|
||||
// Process the rows first, even if EOF is returned
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("reader.Read failed: %v", err)
|
||||
}
|
||||
for i := 0; i < rowCount; i++ {
|
||||
row := rows[i]
|
||||
// convert parquet row to schema_pb.RecordValue
|
||||
|
@ -157,17 +147,6 @@ func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parque
|
|||
}
|
||||
}
|
||||
total += rowCount
|
||||
|
||||
// Check for end conditions after processing rows
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("reader.Read failed: %v", err)
|
||||
}
|
||||
if rowCount == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fmt.Printf("total: %v\n", total)
|
||||
return
|
||||
|
|
|
@ -3,9 +3,8 @@ package operation
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||
"io"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
|
@ -54,10 +53,6 @@ func TailVolumeFromSource(volumeServer pb.ServerAddress, grpcDialOption grpc.Dia
|
|||
|
||||
needleHeader := resp.NeedleHeader
|
||||
needleBody := resp.NeedleBody
|
||||
version := needle.Version(resp.Version)
|
||||
if version == 0 {
|
||||
version = needle.GetCurrentVersion()
|
||||
}
|
||||
|
||||
if len(needleHeader) == 0 {
|
||||
continue
|
||||
|
@ -77,7 +72,7 @@ func TailVolumeFromSource(volumeServer pb.ServerAddress, grpcDialOption grpc.Dia
|
|||
|
||||
n := new(needle.Needle)
|
||||
n.ParseNeedleHeader(needleHeader)
|
||||
err = n.ReadNeedleBodyBytes(needleBody, version)
|
||||
err = n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
"io"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
|
@ -15,9 +16,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||
|
@ -189,7 +187,7 @@ func (uploader *Uploader) retriedUploadData(ctx context.Context, data []byte, op
|
|||
uploadResult.RetryCount = i
|
||||
return
|
||||
}
|
||||
glog.WarningfCtx(ctx, "uploading %d to %s: %v", i, option.UploadUrl, err)
|
||||
glog.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -331,16 +329,16 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
|||
|
||||
file_writer, cp_err := body_writer.CreatePart(h)
|
||||
if cp_err != nil {
|
||||
glog.V(0).InfolnCtx(ctx, "error creating form file", cp_err.Error())
|
||||
glog.V(0).Infoln("error creating form file", cp_err.Error())
|
||||
return nil, cp_err
|
||||
}
|
||||
if err := fillBufferFunction(file_writer); err != nil {
|
||||
glog.V(0).InfolnCtx(ctx, "error copying data", err)
|
||||
glog.V(0).Infoln("error copying data", err)
|
||||
return nil, err
|
||||
}
|
||||
content_type := body_writer.FormDataContentType()
|
||||
if err := body_writer.Close(); err != nil {
|
||||
glog.V(0).InfolnCtx(ctx, "error closing body", err)
|
||||
glog.V(0).Infoln("error closing body", err)
|
||||
return nil, err
|
||||
}
|
||||
if option.BytesBuffer == nil {
|
||||
|
@ -350,7 +348,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
|||
}
|
||||
req, postErr := http.NewRequest(http.MethodPost, option.UploadUrl, reqReader)
|
||||
if postErr != nil {
|
||||
glog.V(1).InfofCtx(ctx, "create upload request %s: %v", option.UploadUrl, postErr)
|
||||
glog.V(1).Infof("create upload request %s: %v", option.UploadUrl, postErr)
|
||||
return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr)
|
||||
}
|
||||
req.Header.Set("Content-Type", content_type)
|
||||
|
@ -361,7 +359,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
|||
req.Header.Set("Authorization", "BEARER "+string(option.Jwt))
|
||||
}
|
||||
|
||||
request_id.InjectToRequest(ctx, req)
|
||||
util.ReqWithRequestId(req, ctx)
|
||||
|
||||
// print("+")
|
||||
resp, post_err := uploader.httpClient.Do(req)
|
||||
|
@ -369,7 +367,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
|||
if post_err != nil {
|
||||
if strings.Contains(post_err.Error(), "connection reset by peer") ||
|
||||
strings.Contains(post_err.Error(), "use of closed network connection") {
|
||||
glog.V(1).InfofCtx(ctx, "repeat error upload request %s: %v", option.UploadUrl, postErr)
|
||||
glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr)
|
||||
stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc()
|
||||
resp, post_err = uploader.httpClient.Do(req)
|
||||
defer util_http.CloseResponse(resp)
|
||||
|
@ -394,7 +392,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
|||
|
||||
unmarshal_err := json.Unmarshal(resp_body, &ret)
|
||||
if unmarshal_err != nil {
|
||||
glog.ErrorfCtx(ctx, "unmarshal %s: %v", option.UploadUrl, string(resp_body))
|
||||
glog.Errorf("unmarshal %s: %v", option.UploadUrl, string(resp_body))
|
||||
return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err)
|
||||
}
|
||||
if ret.Error != "" {
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -39,7 +39,7 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath util.Fu
|
|||
// glog.V(3).Infof("read %s request: %v", fullFilePath, request)
|
||||
resp, err := LookupEntry(ctx, client, request)
|
||||
if err != nil {
|
||||
glog.V(3).InfofCtx(ctx, "read %s %v: %v", fullFilePath, resp, err)
|
||||
glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -117,7 +117,7 @@ func doSeaweedList(ctx context.Context, client SeaweedFilerClient, fullDirPath u
|
|||
InclusiveStartFrom: inclusive,
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "read directory: %v", request)
|
||||
glog.V(4).Infof("read directory: %v", request)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
stream, err := client.ListEntries(ctx, request)
|
||||
|
@ -165,14 +165,14 @@ func Exists(ctx context.Context, filerClient FilerClient, parentDirectoryPath st
|
|||
Name: entryName,
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
||||
glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
||||
resp, err := LookupEntry(ctx, client, request)
|
||||
if err != nil {
|
||||
if err == ErrNotFound {
|
||||
exists = false
|
||||
return nil
|
||||
}
|
||||
glog.V(0).InfofCtx(ctx, "exists entry %v: %v", request, err)
|
||||
glog.V(0).Infof("exists entry %v: %v", request, err)
|
||||
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
||||
}
|
||||
|
||||
|
@ -193,9 +193,9 @@ func Touch(ctx context.Context, filerClient FilerClient, parentDirectoryPath str
|
|||
Entry: entry,
|
||||
}
|
||||
|
||||
glog.V(4).InfofCtx(ctx, "touch entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
||||
glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
||||
if err := UpdateEntry(ctx, client, request); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "touch exists entry %v: %v", request, err)
|
||||
glog.V(0).Infof("touch exists entry %v: %v", request, err)
|
||||
return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
||||
}
|
||||
|
||||
|
@ -232,9 +232,9 @@ func DoMkdir(ctx context.Context, client SeaweedFilerClient, parentDirectoryPath
|
|||
Entry: entry,
|
||||
}
|
||||
|
||||
glog.V(1).InfofCtx(ctx, "mkdir: %v", request)
|
||||
glog.V(1).Infof("mkdir: %v", request)
|
||||
if err := CreateEntry(ctx, client, request); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "mkdir %v: %v", request, err)
|
||||
glog.V(0).Infof("mkdir %v: %v", request, err)
|
||||
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
|
||||
}
|
||||
|
||||
|
@ -266,9 +266,9 @@ func MkFile(ctx context.Context, filerClient FilerClient, parentDirectoryPath st
|
|||
Entry: entry,
|
||||
}
|
||||
|
||||
glog.V(1).InfofCtx(ctx, "create file: %s/%s", parentDirectoryPath, fileName)
|
||||
glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
|
||||
if err := CreateEntry(ctx, client, request); err != nil {
|
||||
glog.V(0).InfofCtx(ctx, "create file %v:%v", request, err)
|
||||
glog.V(0).Infof("create file %v:%v", request, err)
|
||||
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc v5.29.3
|
||||
// - protoc v5.28.3
|
||||
// source: filer.proto
|
||||
|
||||
package filer_pb
|
||||
|
|
|
@ -111,11 +111,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
|
|||
func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error {
|
||||
resp, err := client.CreateEntry(ctx, request)
|
||||
if err != nil {
|
||||
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
|
||||
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
|
||||
return fmt.Errorf("CreateEntry: %v", err)
|
||||
}
|
||||
if resp.Error != "" {
|
||||
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
|
||||
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
|
||||
return fmt.Errorf("CreateEntry : %v", resp.Error)
|
||||
}
|
||||
return nil
|
||||
|
@ -124,7 +124,7 @@ func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *Create
|
|||
func UpdateEntry(ctx context.Context, client SeaweedFilerClient, request *UpdateEntryRequest) error {
|
||||
_, err := client.UpdateEntry(ctx, request)
|
||||
if err != nil {
|
||||
glog.V(1).InfofCtx(ctx, "update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
|
||||
glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
|
||||
return fmt.Errorf("UpdateEntry: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
@ -136,7 +136,7 @@ func LookupEntry(ctx context.Context, client SeaweedFilerClient, request *Lookup
|
|||
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
glog.V(3).InfofCtx(ctx, "read %s/%v: %v", request.Directory, request.Name, err)
|
||||
glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
|
||||
return nil, fmt.Errorf("LookupEntry1: %v", err)
|
||||
}
|
||||
if resp.Entry == nil {
|
||||
|
|
|
@ -3,6 +3,8 @@ package pb
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/google/uuid"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"math/rand/v2"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
@ -10,10 +12,6 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||
|
@ -130,7 +128,7 @@ func requestIDUnaryInterceptor() grpc.UnaryServerInterceptor {
|
|||
handler grpc.UnaryHandler,
|
||||
) (interface{}, error) {
|
||||
incomingMd, _ := metadata.FromIncomingContext(ctx)
|
||||
idList := incomingMd.Get(request_id.AmzRequestIDHeader)
|
||||
idList := incomingMd.Get(util.RequestIDKey)
|
||||
var reqID string
|
||||
if len(idList) > 0 {
|
||||
reqID = idList[0]
|
||||
|
@ -141,12 +139,11 @@ func requestIDUnaryInterceptor() grpc.UnaryServerInterceptor {
|
|||
|
||||
ctx = metadata.NewOutgoingContext(ctx,
|
||||
metadata.New(map[string]string{
|
||||
request_id.AmzRequestIDHeader: reqID,
|
||||
util.RequestIDKey: reqID,
|
||||
}))
|
||||
|
||||
ctx = request_id.Set(ctx, reqID)
|
||||
|
||||
grpc.SetTrailer(ctx, metadata.Pairs(request_id.AmzRequestIDHeader, reqID))
|
||||
ctx = util.WithRequestID(ctx, reqID)
|
||||
grpc.SetTrailer(ctx, metadata.Pairs(util.RequestIDKey, reqID))
|
||||
|
||||
return handler(ctx, req)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc v5.29.3
|
||||
// protoc-gen-go v1.34.2
|
||||
// protoc v5.28.3
|
||||
// source: iam.proto
|
||||
|
||||
package iam_pb
|
||||
|
@ -11,7 +11,6 @@ import (
|
|||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
unsafe "unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -22,18 +21,21 @@ const (
|
|||
)
|
||||
|
||||
type S3ApiConfiguration struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"`
|
||||
Accounts []*Account `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"`
|
||||
Accounts []*Account `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"`
|
||||
}
|
||||
|
||||
func (x *S3ApiConfiguration) Reset() {
|
||||
*x = S3ApiConfiguration{}
|
||||
mi := &file_iam_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_iam_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *S3ApiConfiguration) String() string {
|
||||
|
@ -44,7 +46,7 @@ func (*S3ApiConfiguration) ProtoMessage() {}
|
|||
|
||||
func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_iam_proto_msgTypes[0]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
|
@ -74,20 +76,23 @@ func (x *S3ApiConfiguration) GetAccounts() []*Account {
|
|||
}
|
||||
|
||||
type Identity struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"`
|
||||
Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
|
||||
Account *Account `protobuf:"bytes,4,opt,name=account,proto3" json:"account,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"`
|
||||
Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
|
||||
Account *Account `protobuf:"bytes,4,opt,name=account,proto3" json:"account,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Identity) Reset() {
|
||||
*x = Identity{}
|
||||
mi := &file_iam_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_iam_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Identity) String() string {
|
||||
|
@ -98,7 +103,7 @@ func (*Identity) ProtoMessage() {}
|
|||
|
||||
func (x *Identity) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_iam_proto_msgTypes[1]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
|
@ -142,18 +147,21 @@ func (x *Identity) GetAccount() *Account {
|
|||
}
|
||||
|
||||
type Credential struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
|
||||
SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
|
||||
SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Credential) Reset() {
|
||||
*x = Credential{}
|
||||
mi := &file_iam_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_iam_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Credential) String() string {
|
||||
|
@ -164,7 +172,7 @@ func (*Credential) ProtoMessage() {}
|
|||
|
||||
func (x *Credential) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_iam_proto_msgTypes[2]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
|
@ -194,19 +202,22 @@ func (x *Credential) GetSecretKey() string {
|
|||
}
|
||||
|
||||
type Account struct {
|
||||
state protoimpl.MessageState `protogen:"open.v1"`
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
EmailAddress string `protobuf:"bytes,3,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
|
||||
unknownFields protoimpl.UnknownFields
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||
EmailAddress string `protobuf:"bytes,3,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Account) Reset() {
|
||||
*x = Account{}
|
||||
mi := &file_iam_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_iam_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Account) String() string {
|
||||
|
@ -217,7 +228,7 @@ func (*Account) ProtoMessage() {}
|
|||
|
||||
func (x *Account) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_iam_proto_msgTypes[3]
|
||||
if x != nil {
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
|
@ -255,40 +266,54 @@ func (x *Account) GetEmailAddress() string {
|
|||
|
||||
var File_iam_proto protoreflect.FileDescriptor
|
||||
|
||||
const file_iam_proto_rawDesc = "" +
|
||||
"\n" +
|
||||
"\tiam.proto\x12\x06iam_pb\"s\n" +
|
||||
"\x12S3ApiConfiguration\x120\n" +
|
||||
"\n" +
|
||||
"identities\x18\x01 \x03(\v2\x10.iam_pb.IdentityR\n" +
|
||||
"identities\x12+\n" +
|
||||
"\baccounts\x18\x02 \x03(\v2\x0f.iam_pb.AccountR\baccounts\"\x99\x01\n" +
|
||||
"\bIdentity\x12\x12\n" +
|
||||
"\x04name\x18\x01 \x01(\tR\x04name\x124\n" +
|
||||
"\vcredentials\x18\x02 \x03(\v2\x12.iam_pb.CredentialR\vcredentials\x12\x18\n" +
|
||||
"\aactions\x18\x03 \x03(\tR\aactions\x12)\n" +
|
||||
"\aaccount\x18\x04 \x01(\v2\x0f.iam_pb.AccountR\aaccount\"J\n" +
|
||||
"\n" +
|
||||
"Credential\x12\x1d\n" +
|
||||
"\n" +
|
||||
"access_key\x18\x01 \x01(\tR\taccessKey\x12\x1d\n" +
|
||||
"\n" +
|
||||
"secret_key\x18\x02 \x01(\tR\tsecretKey\"a\n" +
|
||||
"\aAccount\x12\x0e\n" +
|
||||
"\x02id\x18\x01 \x01(\tR\x02id\x12!\n" +
|
||||
"\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12#\n" +
|
||||
"\remail_address\x18\x03 \x01(\tR\femailAddress2!\n" +
|
||||
"\x1fSeaweedIdentityAccessManagementBK\n" +
|
||||
"\x10seaweedfs.clientB\bIamProtoZ-github.com/seaweedfs/seaweedfs/weed/pb/iam_pbb\x06proto3"
|
||||
var file_iam_proto_rawDesc = []byte{
|
||||
0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d,
|
||||
0x5f, 0x70, 0x62, 0x22, 0x73, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66,
|
||||
0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65,
|
||||
0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
|
||||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
|
||||
0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x08, 0x61,
|
||||
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x08,
|
||||
0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65,
|
||||
0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63, 0x72, 0x65,
|
||||
0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12,
|
||||
0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
|
||||
0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12,
|
||||
0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
|
||||
0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x61, 0x63, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x61, 0x6d,
|
||||
0x5f, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x07, 0x61, 0x63, 0x63,
|
||||
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x4a, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
|
||||
0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65,
|
||||
0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79,
|
||||
0x22, 0x61, 0x0a, 0x07, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64,
|
||||
0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
||||
0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23,
|
||||
0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
|
||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72,
|
||||
0x65, 0x73, 0x73, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x49, 0x64,
|
||||
0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x6e, 0x61,
|
||||
0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
|
||||
0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08, 0x49, 0x61, 0x6d, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
||||
0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65,
|
||||
0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x69, 0x61, 0x6d,
|
||||
0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_iam_proto_rawDescOnce sync.Once
|
||||
file_iam_proto_rawDescData []byte
|
||||
file_iam_proto_rawDescData = file_iam_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_iam_proto_rawDescGZIP() []byte {
|
||||
file_iam_proto_rawDescOnce.Do(func() {
|
||||
file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_iam_proto_rawDesc), len(file_iam_proto_rawDesc)))
|
||||
file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData)
|
||||
})
|
||||
return file_iam_proto_rawDescData
|
||||
}
|
||||
|
@ -317,11 +342,61 @@ func file_iam_proto_init() {
|
|||
if File_iam_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_iam_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*S3ApiConfiguration); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_iam_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*Identity); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_iam_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*Credential); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_iam_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
||||
switch v := v.(*Account); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: unsafe.Slice(unsafe.StringData(file_iam_proto_rawDesc), len(file_iam_proto_rawDesc)),
|
||||
RawDescriptor: file_iam_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 4,
|
||||
NumExtensions: 0,
|
||||
|
@ -332,6 +407,7 @@ func file_iam_proto_init() {
|
|||
MessageInfos: file_iam_proto_msgTypes,
|
||||
}.Build()
|
||||
File_iam_proto = out.File
|
||||
file_iam_proto_rawDesc = nil
|
||||
file_iam_proto_goTypes = nil
|
||||
file_iam_proto_depIdxs = nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc v5.29.3
|
||||
// - protoc v5.28.3
|
||||
// source: iam.proto
|
||||
|
||||
package iam_pb
|
||||
|
|
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue