mirror of
https://github.com/chrislusf/seaweedfs
synced 2025-06-29 16:22:46 +02:00
Compare commits
58 commits
Author | SHA1 | Date | |
---|---|---|---|
|
3023a6f3a4 | ||
|
a788d9ab53 | ||
|
3d519fa2a6 | ||
|
1733d0ce68 | ||
|
166e36bcd3 | ||
|
adc7807451 | ||
|
52097a1d9b | ||
|
4cd6c3ec36 | ||
|
a1aab8a083 | ||
|
29892c43ff | ||
|
5e79436498 | ||
|
877b9b788a | ||
|
ab49540d2b | ||
|
95261a712e | ||
|
4511c2cc1f | ||
|
2cdd8092cc | ||
|
e222883dd0 | ||
|
3b6155f4ee | ||
|
29fa698414 | ||
|
f68f55c7e9 | ||
|
a5bb5e04da | ||
|
7f1f826468 | ||
|
47b9db0215 | ||
|
ba8d261591 | ||
|
5f1d2a9745 | ||
|
b27ba8e984 | ||
|
7324cb7171 | ||
|
5a7d226d93 | ||
|
2b3385e201 | ||
|
828228dbb0 | ||
|
90c128e7a6 | ||
|
a72c442945 | ||
|
f52134f9a1 | ||
|
abd5102819 | ||
|
da728750be | ||
|
2f1b3d68d7 | ||
|
87927d068b | ||
|
9a115068af | ||
|
748bf5e4d3 | ||
|
c602f53a6e | ||
|
d2be5822a1 | ||
|
96632a34b1 | ||
|
11f37cd9f2 | ||
|
34c6249886 | ||
|
5d8a391b95 | ||
|
06a3140142 | ||
|
29d1701c34 | ||
|
78069605a6 | ||
|
549fb110d7 | ||
|
f0e987dc9d | ||
|
f598d8e84c | ||
|
d6de561650 | ||
|
0cd3483158 | ||
|
db36e89e7b | ||
|
d8e8e11519 | ||
|
13103c32d8 | ||
|
77397be070 | ||
|
958d88cb85 |
162 changed files with 10034 additions and 11971 deletions
2
.github/workflows/container_dev.yml
vendored
2
.github/workflows/container_dev.yml
vendored
|
@ -36,7 +36,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
with:
|
with:
|
||||||
buildkitd-flags: "--debug"
|
buildkitd-flags: "--debug"
|
||||||
-
|
-
|
||||||
|
|
2
.github/workflows/container_latest.yml
vendored
2
.github/workflows/container_latest.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
with:
|
with:
|
||||||
buildkitd-flags: "--debug"
|
buildkitd-flags: "--debug"
|
||||||
-
|
-
|
||||||
|
|
2
.github/workflows/container_release1.yml
vendored
2
.github/workflows/container_release1.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
-
|
-
|
||||||
name: Login to Docker Hub
|
name: Login to Docker Hub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
|
|
2
.github/workflows/container_release2.yml
vendored
2
.github/workflows/container_release2.yml
vendored
|
@ -38,7 +38,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
-
|
-
|
||||||
name: Login to Docker Hub
|
name: Login to Docker Hub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
|
|
2
.github/workflows/container_release3.yml
vendored
2
.github/workflows/container_release3.yml
vendored
|
@ -38,7 +38,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
-
|
-
|
||||||
name: Login to Docker Hub
|
name: Login to Docker Hub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
|
|
2
.github/workflows/container_release4.yml
vendored
2
.github/workflows/container_release4.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
-
|
-
|
||||||
name: Login to Docker Hub
|
name: Login to Docker Hub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
|
|
2
.github/workflows/container_release5.yml
vendored
2
.github/workflows/container_release5.yml
vendored
|
@ -37,7 +37,7 @@ jobs:
|
||||||
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
uses: docker/setup-qemu-action@29109295f81e9208d7d86ff1c6c12d2833863392 # v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v1
|
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v1
|
||||||
-
|
-
|
||||||
name: Login to Docker Hub
|
name: Login to Docker Hub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
|
|
171
.github/workflows/deploy_telemetry.yml
vendored
Normal file
171
.github/workflows/deploy_telemetry.yml
vendored
Normal file
|
@ -0,0 +1,171 @@
|
||||||
|
# This workflow will build and deploy the SeaweedFS telemetry server
|
||||||
|
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
|
||||||
|
|
||||||
|
name: Deploy Telemetry Server
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
setup:
|
||||||
|
description: 'Run first-time server setup'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
deploy:
|
||||||
|
description: 'Deploy telemetry server to remote server'
|
||||||
|
required: true
|
||||||
|
type: boolean
|
||||||
|
default: false
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Set up Go
|
||||||
|
uses: actions/setup-go@v4
|
||||||
|
with:
|
||||||
|
go-version: '1.24'
|
||||||
|
|
||||||
|
- name: Build Telemetry Server
|
||||||
|
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||||
|
run: |
|
||||||
|
go mod tidy
|
||||||
|
echo "Building telemetry server..."
|
||||||
|
GOOS=linux GOARCH=amd64 go build -o telemetry-server ./telemetry/server/main.go
|
||||||
|
ls -la telemetry-server
|
||||||
|
echo "Build completed successfully"
|
||||||
|
|
||||||
|
- name: First-time Server Setup
|
||||||
|
if: github.event_name == 'workflow_dispatch' && inputs.setup
|
||||||
|
env:
|
||||||
|
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||||
|
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||||
|
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||||
|
chmod 600 ~/.ssh/deploy_key
|
||||||
|
echo "Host *" > ~/.ssh/config
|
||||||
|
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||||
|
|
||||||
|
# Create all required directories with proper permissions
|
||||||
|
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||||
|
mkdir -p ~/seaweedfs-telemetry/bin ~/seaweedfs-telemetry/logs ~/seaweedfs-telemetry/data ~/seaweedfs-telemetry/tmp && \
|
||||||
|
chmod 755 ~/seaweedfs-telemetry/logs && \
|
||||||
|
chmod 755 ~/seaweedfs-telemetry/data && \
|
||||||
|
touch ~/seaweedfs-telemetry/logs/telemetry.log ~/seaweedfs-telemetry/logs/telemetry.error.log && \
|
||||||
|
chmod 644 ~/seaweedfs-telemetry/logs/*.log"
|
||||||
|
|
||||||
|
# Create systemd service file
|
||||||
|
echo "
|
||||||
|
[Unit]
|
||||||
|
Description=SeaweedFS Telemetry Server
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
User=$REMOTE_USER
|
||||||
|
WorkingDirectory=/home/$REMOTE_USER/seaweedfs-telemetry
|
||||||
|
ExecStart=/home/$REMOTE_USER/seaweedfs-telemetry/bin/telemetry-server -port=8353
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5
|
||||||
|
StandardOutput=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.log
|
||||||
|
StandardError=append:/home/$REMOTE_USER/seaweedfs-telemetry/logs/telemetry.error.log
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target" > telemetry.service
|
||||||
|
|
||||||
|
# Setup logrotate configuration
|
||||||
|
echo "# SeaweedFS Telemetry service log rotation
|
||||||
|
/home/$REMOTE_USER/seaweedfs-telemetry/logs/*.log {
|
||||||
|
daily
|
||||||
|
rotate 30
|
||||||
|
compress
|
||||||
|
delaycompress
|
||||||
|
missingok
|
||||||
|
notifempty
|
||||||
|
create 644 $REMOTE_USER $REMOTE_USER
|
||||||
|
postrotate
|
||||||
|
systemctl restart telemetry.service
|
||||||
|
endscript
|
||||||
|
}" > telemetry_logrotate
|
||||||
|
|
||||||
|
# Copy configuration files
|
||||||
|
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||||
|
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||||
|
|
||||||
|
# Copy and install service and logrotate files
|
||||||
|
scp -i ~/.ssh/deploy_key telemetry.service telemetry_logrotate $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||||
|
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||||
|
sudo mv ~/seaweedfs-telemetry/telemetry.service /etc/systemd/system/ && \
|
||||||
|
sudo mv ~/seaweedfs-telemetry/telemetry_logrotate /etc/logrotate.d/seaweedfs-telemetry && \
|
||||||
|
sudo systemctl daemon-reload && \
|
||||||
|
sudo systemctl enable telemetry.service"
|
||||||
|
|
||||||
|
echo "✅ First-time setup completed successfully!"
|
||||||
|
echo "📋 Next step: Run the deployment to install the telemetry server binary"
|
||||||
|
echo " 1. Go to GitHub Actions → Deploy Telemetry Server"
|
||||||
|
echo " 2. Click 'Run workflow'"
|
||||||
|
echo " 3. Check 'Deploy telemetry server to remote server'"
|
||||||
|
echo " 4. Click 'Run workflow'"
|
||||||
|
|
||||||
|
rm -f ~/.ssh/deploy_key
|
||||||
|
|
||||||
|
- name: Deploy Telemetry Server to Remote Server
|
||||||
|
if: github.event_name == 'workflow_dispatch' && inputs.deploy
|
||||||
|
env:
|
||||||
|
SSH_PRIVATE_KEY: ${{ secrets.TELEMETRY_SSH_PRIVATE_KEY }}
|
||||||
|
REMOTE_HOST: ${{ secrets.TELEMETRY_HOST }}
|
||||||
|
REMOTE_USER: ${{ secrets.TELEMETRY_USER }}
|
||||||
|
run: |
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
echo "$SSH_PRIVATE_KEY" > ~/.ssh/deploy_key
|
||||||
|
chmod 600 ~/.ssh/deploy_key
|
||||||
|
echo "Host *" > ~/.ssh/config
|
||||||
|
echo " StrictHostKeyChecking no" >> ~/.ssh/config
|
||||||
|
|
||||||
|
# Create temp directory and copy binary
|
||||||
|
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "mkdir -p ~/seaweedfs-telemetry/tmp"
|
||||||
|
scp -i ~/.ssh/deploy_key telemetry-server $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/tmp/
|
||||||
|
|
||||||
|
# Copy updated configuration files
|
||||||
|
scp -i ~/.ssh/deploy_key telemetry/grafana-dashboard.json $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||||
|
scp -i ~/.ssh/deploy_key telemetry/prometheus.yml $REMOTE_USER@$REMOTE_HOST:~/seaweedfs-telemetry/
|
||||||
|
|
||||||
|
# Check if service exists and deploy accordingly
|
||||||
|
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||||
|
if systemctl list-unit-files telemetry.service >/dev/null 2>&1; then
|
||||||
|
echo 'Service exists, performing update...'
|
||||||
|
sudo systemctl stop telemetry.service
|
||||||
|
mkdir -p ~/seaweedfs-telemetry/bin
|
||||||
|
mv ~/seaweedfs-telemetry/tmp/telemetry-server ~/seaweedfs-telemetry/bin/
|
||||||
|
chmod +x ~/seaweedfs-telemetry/bin/telemetry-server
|
||||||
|
sudo systemctl start telemetry.service
|
||||||
|
sudo systemctl status telemetry.service
|
||||||
|
else
|
||||||
|
echo 'ERROR: telemetry.service not found!'
|
||||||
|
echo 'Please run the first-time setup before deploying.'
|
||||||
|
echo 'Go to GitHub Actions → Deploy Telemetry Server → Run workflow → Check \"Run first-time server setup\"'
|
||||||
|
exit 1
|
||||||
|
fi"
|
||||||
|
|
||||||
|
# Verify deployment
|
||||||
|
ssh -i ~/.ssh/deploy_key $REMOTE_USER@$REMOTE_HOST "
|
||||||
|
echo 'Waiting for service to start...'
|
||||||
|
sleep 5
|
||||||
|
curl -f http://localhost:8353/health || echo 'Health check failed'"
|
||||||
|
|
||||||
|
rm -f ~/.ssh/deploy_key
|
||||||
|
|
||||||
|
- name: Notify Deployment Status
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
if [ "${{ job.status }}" == "success" ]; then
|
||||||
|
echo "✅ Telemetry server deployment successful"
|
||||||
|
echo "Dashboard: http://${{ secrets.TELEMETRY_HOST }}:8353"
|
||||||
|
echo "Metrics: http://${{ secrets.TELEMETRY_HOST }}:8353/metrics"
|
||||||
|
else
|
||||||
|
echo "❌ Telemetry server deployment failed"
|
||||||
|
fi
|
|
@ -73,6 +73,7 @@ Table of Contents
|
||||||
* [Installation Guide](#installation-guide)
|
* [Installation Guide](#installation-guide)
|
||||||
* [Disk Related Topics](#disk-related-topics)
|
* [Disk Related Topics](#disk-related-topics)
|
||||||
* [Benchmark](#benchmark)
|
* [Benchmark](#benchmark)
|
||||||
|
* [Enterprise](#enterprise)
|
||||||
* [License](#license)
|
* [License](#license)
|
||||||
|
|
||||||
# Quick Start #
|
# Quick Start #
|
||||||
|
@ -651,6 +652,13 @@ Total Errors:0.
|
||||||
|
|
||||||
[Back to TOC](#table-of-contents)
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
|
## Enterprise ##
|
||||||
|
|
||||||
|
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
|
||||||
|
which has a self-healing storage format with better data protection.
|
||||||
|
|
||||||
|
[Back to TOC](#table-of-contents)
|
||||||
|
|
||||||
## License ##
|
## License ##
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
|
220
go.mod
220
go.mod
|
@ -5,9 +5,9 @@ go 1.24
|
||||||
toolchain go1.24.1
|
toolchain go1.24.1
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.121.0 // indirect
|
cloud.google.com/go v0.121.1 // indirect
|
||||||
cloud.google.com/go/pubsub v1.49.0
|
cloud.google.com/go/pubsub v1.49.0
|
||||||
cloud.google.com/go/storage v1.54.0
|
cloud.google.com/go/storage v1.55.0
|
||||||
github.com/Azure/azure-pipeline-go v0.2.3
|
github.com/Azure/azure-pipeline-go v0.2.3
|
||||||
github.com/Azure/azure-storage-blob-go v0.15.0
|
github.com/Azure/azure-storage-blob-go v0.15.0
|
||||||
github.com/Shopify/sarama v1.38.1
|
github.com/Shopify/sarama v1.38.1
|
||||||
|
@ -31,7 +31,7 @@ require (
|
||||||
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
github.com/fsnotify/fsnotify v1.8.0 // indirect
|
||||||
github.com/go-redsync/redsync/v4 v4.13.0
|
github.com/go-redsync/redsync/v4 v4.13.0
|
||||||
github.com/go-sql-driver/mysql v1.9.2
|
github.com/go-sql-driver/mysql v1.9.3
|
||||||
github.com/go-zookeeper/zk v1.0.3 // indirect
|
github.com/go-zookeeper/zk v1.0.3 // indirect
|
||||||
github.com/gocql/gocql v1.7.0
|
github.com/gocql/gocql v1.7.0
|
||||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
|
||||||
|
@ -69,8 +69,8 @@ require (
|
||||||
github.com/posener/complete v1.2.3
|
github.com/posener/complete v1.2.3
|
||||||
github.com/pquerna/cachecontrol v0.2.0
|
github.com/pquerna/cachecontrol v0.2.0
|
||||||
github.com/prometheus/client_golang v1.22.0
|
github.com/prometheus/client_golang v1.22.0
|
||||||
github.com/prometheus/client_model v0.6.1 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/common v0.62.0 // indirect
|
github.com/prometheus/common v0.64.0 // indirect
|
||||||
github.com/prometheus/procfs v0.16.1
|
github.com/prometheus/procfs v0.16.1
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||||
|
@ -95,23 +95,23 @@ require (
|
||||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||||
go.etcd.io/etcd/client/v3 v3.6.1
|
go.etcd.io/etcd/client/v3 v3.6.1
|
||||||
go.mongodb.org/mongo-driver v1.17.3
|
go.mongodb.org/mongo-driver v1.17.4
|
||||||
go.opencensus.io v0.24.0 // indirect
|
go.opencensus.io v0.24.0 // indirect
|
||||||
gocloud.dev v0.41.0
|
gocloud.dev v0.41.0
|
||||||
gocloud.dev/pubsub/natspubsub v0.41.0
|
gocloud.dev/pubsub/natspubsub v0.41.0
|
||||||
gocloud.dev/pubsub/rabbitpubsub v0.41.0
|
gocloud.dev/pubsub/rabbitpubsub v0.41.0
|
||||||
golang.org/x/crypto v0.38.0
|
golang.org/x/crypto v0.39.0
|
||||||
golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
|
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476
|
||||||
golang.org/x/image v0.27.0
|
golang.org/x/image v0.28.0
|
||||||
golang.org/x/net v0.40.0
|
golang.org/x/net v0.41.0
|
||||||
golang.org/x/oauth2 v0.30.0 // indirect
|
golang.org/x/oauth2 v0.30.0 // indirect
|
||||||
golang.org/x/sys v0.33.0
|
golang.org/x/sys v0.33.0
|
||||||
golang.org/x/text v0.25.0 // indirect
|
golang.org/x/text v0.26.0 // indirect
|
||||||
golang.org/x/tools v0.33.0
|
golang.org/x/tools v0.34.0
|
||||||
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
|
||||||
google.golang.org/api v0.234.0
|
google.golang.org/api v0.238.0
|
||||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
||||||
google.golang.org/grpc v1.72.1
|
google.golang.org/grpc v1.73.0
|
||||||
google.golang.org/protobuf v1.36.6
|
google.golang.org/protobuf v1.36.6
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
modernc.org/b v1.0.0 // indirect
|
modernc.org/b v1.0.0 // indirect
|
||||||
|
@ -125,126 +125,133 @@ require (
|
||||||
github.com/Jille/raft-grpc-transport v1.6.1
|
github.com/Jille/raft-grpc-transport v1.6.1
|
||||||
github.com/arangodb/go-driver v1.6.6
|
github.com/arangodb/go-driver v1.6.6
|
||||||
github.com/armon/go-metrics v0.4.1
|
github.com/armon/go-metrics v0.4.1
|
||||||
github.com/aws/aws-sdk-go-v2 v1.36.3
|
github.com/aws/aws-sdk-go-v2 v1.36.5
|
||||||
github.com/aws/aws-sdk-go-v2/config v1.29.14
|
github.com/aws/aws-sdk-go-v2/config v1.29.17
|
||||||
github.com/aws/aws-sdk-go-v2/credentials v1.17.68
|
github.com/aws/aws-sdk-go-v2/credentials v1.17.70
|
||||||
github.com/aws/aws-sdk-go-v2/service/s3 v1.80.1
|
github.com/aws/aws-sdk-go-v2/service/s3 v1.81.0
|
||||||
github.com/cognusion/imaging v1.0.2
|
github.com/cognusion/imaging v1.0.2
|
||||||
github.com/fluent/fluent-logger-golang v1.10.0
|
github.com/fluent/fluent-logger-golang v1.10.0
|
||||||
github.com/getsentry/sentry-go v0.31.1
|
github.com/getsentry/sentry-go v0.33.0
|
||||||
github.com/golang-jwt/jwt/v5 v5.2.2
|
github.com/golang-jwt/jwt/v5 v5.2.2
|
||||||
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
|
github.com/google/flatbuffers/go v0.0.0-20230108230133-3b8644d32c50
|
||||||
github.com/hanwen/go-fuse/v2 v2.7.3-0.20250605191109-50f6569d1a7d
|
github.com/hanwen/go-fuse/v2 v2.8.0
|
||||||
github.com/hashicorp/raft v1.7.3
|
github.com/hashicorp/raft v1.7.3
|
||||||
github.com/hashicorp/raft-boltdb/v2 v2.3.1
|
github.com/hashicorp/raft-boltdb/v2 v2.3.1
|
||||||
github.com/minio/crc64nvme v1.0.1
|
github.com/minio/crc64nvme v1.0.2
|
||||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||||
github.com/parquet-go/parquet-go v0.24.0
|
github.com/parquet-go/parquet-go v0.25.1
|
||||||
github.com/pkg/sftp v1.13.7
|
github.com/pkg/sftp v1.13.9
|
||||||
github.com/rabbitmq/amqp091-go v1.10.0
|
github.com/rabbitmq/amqp091-go v1.10.0
|
||||||
github.com/rclone/rclone v1.69.3
|
github.com/rclone/rclone v1.70.1
|
||||||
github.com/rdleal/intervalst v1.4.1
|
github.com/rdleal/intervalst v1.5.0
|
||||||
github.com/redis/go-redis/v9 v9.10.0
|
github.com/redis/go-redis/v9 v9.10.0
|
||||||
github.com/schollz/progressbar/v3 v3.18.0
|
github.com/schollz/progressbar/v3 v3.18.0
|
||||||
github.com/shirou/gopsutil/v3 v3.24.5
|
github.com/shirou/gopsutil/v3 v3.24.5
|
||||||
github.com/tarantool/go-tarantool/v2 v2.3.2
|
github.com/tarantool/go-tarantool/v2 v2.3.2
|
||||||
github.com/tikv/client-go/v2 v2.0.7
|
github.com/tikv/client-go/v2 v2.0.7
|
||||||
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
|
github.com/ydb-platform/ydb-go-sdk-auth-environ v0.5.0
|
||||||
github.com/ydb-platform/ydb-go-sdk/v3 v3.108.3
|
github.com/ydb-platform/ydb-go-sdk/v3 v3.111.0
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.6.1
|
go.etcd.io/etcd/client/pkg/v3 v3.6.1
|
||||||
go.uber.org/atomic v1.11.0
|
go.uber.org/atomic v1.11.0
|
||||||
golang.org/x/sync v0.14.0
|
golang.org/x/sync v0.15.0
|
||||||
google.golang.org/grpc/security/advancedtls v1.0.0
|
google.golang.org/grpc/security/advancedtls v1.0.0
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cel.dev/expr v0.22.1 // indirect
|
cel.dev/expr v0.23.0 // indirect
|
||||||
cloud.google.com/go/auth v0.16.1 // indirect
|
cloud.google.com/go/auth v0.16.2 // indirect
|
||||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||||
cloud.google.com/go/iam v1.5.2 // indirect
|
cloud.google.com/go/iam v1.5.2 // indirect
|
||||||
cloud.google.com/go/monitoring v1.24.2 // indirect
|
cloud.google.com/go/monitoring v1.24.2 // indirect
|
||||||
filippo.io/edwards25519 v1.1.0 // indirect
|
filippo.io/edwards25519 v1.1.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.17.1 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.2 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.0 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
|
||||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.4.0 // indirect
|
github.com/Azure/azure-sdk-for-go/sdk/storage/azfile v1.5.1 // indirect
|
||||||
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
|
||||||
github.com/Files-com/files-sdk-go/v3 v3.2.97 // indirect
|
github.com/Files-com/files-sdk-go/v3 v3.2.173 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
|
||||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
|
||||||
|
github.com/IBM/go-sdk-core/v5 v5.20.0 // indirect
|
||||||
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
|
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||||
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
|
||||||
github.com/ProtonMail/go-crypto v1.1.3 // indirect
|
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||||
github.com/ProtonMail/gopenpgp/v2 v2.7.4 // indirect
|
github.com/ProtonMail/gopenpgp/v2 v2.9.0 // indirect
|
||||||
github.com/PuerkitoBio/goquery v1.8.1 // indirect
|
github.com/PuerkitoBio/goquery v1.10.3 // indirect
|
||||||
github.com/abbot/go-http-auth v0.4.0 // indirect
|
github.com/abbot/go-http-auth v0.4.0 // indirect
|
||||||
github.com/andybalholm/brotli v1.1.0 // indirect
|
github.com/andybalholm/brotli v1.1.0 // indirect
|
||||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
github.com/andybalholm/cascadia v1.3.3 // indirect
|
||||||
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
github.com/appscode/go-querystring v0.0.0-20170504095604-0126cfb3f1dc // indirect
|
||||||
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
|
github.com/arangodb/go-velocypack v0.0.0-20200318135517-5af53c29c67e // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
|
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
|
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.11 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.69 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
|
github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.77 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.36 // indirect
|
||||||
|
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.36 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
|
github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.36 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.2 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.4 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
|
github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.17 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sns v1.34.2 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sns v1.34.2 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.3 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sqs v1.38.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
|
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect
|
||||||
github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 // indirect
|
github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect
|
||||||
github.com/aws/smithy-go v1.22.3 // indirect
|
github.com/aws/smithy-go v1.22.4 // indirect
|
||||||
github.com/boltdb/bolt v1.3.1 // indirect
|
github.com/boltdb/bolt v1.3.1 // indirect
|
||||||
github.com/bradenaw/juniper v0.15.2 // indirect
|
github.com/bradenaw/juniper v0.15.3 // indirect
|
||||||
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
github.com/bradfitz/iter v0.0.0-20191230175014-e8f45d346db8 // indirect
|
||||||
github.com/buengese/sgzip v0.1.1 // indirect
|
github.com/buengese/sgzip v0.1.1 // indirect
|
||||||
github.com/calebcase/tmpfile v1.0.3 // indirect
|
github.com/calebcase/tmpfile v1.0.3 // indirect
|
||||||
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
|
github.com/chilts/sid v0.0.0-20190607042430-660e94789ec9 // indirect
|
||||||
github.com/cloudflare/circl v1.6.1 // indirect
|
github.com/cloudflare/circl v1.6.1 // indirect
|
||||||
github.com/cloudinary/cloudinary-go/v2 v2.9.0 // indirect
|
github.com/cloudinary/cloudinary-go/v2 v2.10.0 // indirect
|
||||||
github.com/cloudsoda/go-smb2 v0.0.0-20241223203758-52b943b88fd6 // indirect
|
github.com/cloudsoda/go-smb2 v0.0.0-20250228001242-d4c70e6251cc // indirect
|
||||||
|
github.com/cloudsoda/sddl v0.0.0-20250224235906-926454e91efc // indirect
|
||||||
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
github.com/cncf/xds/go v0.0.0-20250326154945-ae57f3c0d45f // indirect
|
||||||
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
|
github.com/colinmarc/hdfs/v2 v2.4.0 // indirect
|
||||||
github.com/creasty/defaults v1.7.0 // indirect
|
github.com/creasty/defaults v1.8.0 // indirect
|
||||||
github.com/cronokirby/saferith v0.33.0 // indirect
|
github.com/cronokirby/saferith v0.33.0 // indirect
|
||||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||||
github.com/d4l3k/messagediff v1.2.1 // indirect
|
github.com/d4l3k/messagediff v1.2.1 // indirect
|
||||||
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect
|
||||||
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
|
github.com/dropbox/dropbox-sdk-go-unofficial/v6 v6.0.5 // indirect
|
||||||
github.com/ebitengine/purego v0.8.3 // indirect
|
github.com/ebitengine/purego v0.8.4 // indirect
|
||||||
github.com/elastic/gosigar v0.14.2 // indirect
|
github.com/elastic/gosigar v0.14.2 // indirect
|
||||||
github.com/emersion/go-message v0.18.0 // indirect
|
github.com/emersion/go-message v0.18.2 // indirect
|
||||||
github.com/emersion/go-textwrapper v0.0.0-20200911093747-65d896831594 // indirect
|
github.com/emersion/go-vcard v0.0.0-20241024213814-c9703dde27ff // indirect
|
||||||
github.com/emersion/go-vcard v0.0.0-20230815062825-8fda7d206ec9 // indirect
|
|
||||||
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
|
||||||
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
|
||||||
github.com/fatih/color v1.16.0 // indirect
|
github.com/fatih/color v1.16.0 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||||
github.com/flynn/noise v1.0.1 // indirect
|
github.com/flynn/noise v1.1.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.7 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.9 // indirect
|
||||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
github.com/geoffgarside/ber v1.2.0 // indirect
|
||||||
github.com/go-chi/chi/v5 v5.1.0 // indirect
|
github.com/go-chi/chi/v5 v5.2.2 // indirect
|
||||||
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
|
github.com/go-darwin/apfs v0.0.0-20211011131704-f84b94dbf348 // indirect
|
||||||
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
github.com/go-jose/go-jose/v4 v4.0.5 // indirect
|
||||||
github.com/go-logr/logr v1.4.2 // indirect
|
github.com/go-logr/logr v1.4.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-ole/go-ole v1.3.0 // indirect
|
github.com/go-ole/go-ole v1.3.0 // indirect
|
||||||
github.com/go-resty/resty/v2 v2.11.0 // indirect
|
github.com/go-openapi/errors v0.22.1 // indirect
|
||||||
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
|
github.com/go-openapi/strfmt v0.23.0 // indirect
|
||||||
github.com/gofrs/flock v0.8.1 // indirect
|
github.com/go-playground/locales v0.14.1 // indirect
|
||||||
|
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||||
|
github.com/go-playground/validator/v10 v10.26.0 // indirect
|
||||||
|
github.com/go-resty/resty/v2 v2.16.5 // indirect
|
||||||
|
github.com/go-viper/mapstructure/v2 v2.3.0 // indirect
|
||||||
|
github.com/gofrs/flock v0.12.1 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
|
||||||
github.com/google/s2a-go v0.1.9 // indirect
|
github.com/google/s2a-go v0.1.9 // indirect
|
||||||
|
@ -270,33 +277,35 @@ require (
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
|
github.com/jtolio/noiseconn v0.0.0-20231127013910-f6d9ecbf1de7 // indirect
|
||||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
|
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004 // indirect
|
||||||
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
|
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
|
||||||
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
|
github.com/koofr/go-httpclient v0.0.0-20240520111329-e20f8f203988 // indirect
|
||||||
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
|
github.com/koofr/go-koofrclient v0.0.0-20221207135200-cbd7fc9ad6a6 // indirect
|
||||||
github.com/kr/fs v0.1.0 // indirect
|
github.com/kr/fs v0.1.0 // indirect
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
|
github.com/lanrat/extsort v1.0.2 // indirect
|
||||||
|
github.com/leodido/go-urn v1.4.0 // indirect
|
||||||
github.com/lpar/date v1.0.0 // indirect
|
github.com/lpar/date v1.0.0 // indirect
|
||||||
github.com/lufia/plan9stats v0.0.0-20231016141302-07b5767bb0ed // indirect
|
github.com/lufia/plan9stats v0.0.0-20250317134145-8bc96cf8fc35 // indirect
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
github.com/mattn/go-colorable v0.1.14 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||||
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
|
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||||
github.com/montanaflynn/stats v0.7.1 // indirect
|
github.com/montanaflynn/stats v0.7.1 // indirect
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||||
github.com/nats-io/nats.go v1.40.1 // indirect
|
github.com/nats-io/nats.go v1.40.1 // indirect
|
||||||
github.com/nats-io/nkeys v0.4.10 // indirect
|
github.com/nats-io/nkeys v0.4.10 // indirect
|
||||||
github.com/nats-io/nuid v1.0.1 // indirect
|
github.com/nats-io/nuid v1.0.1 // indirect
|
||||||
github.com/ncruces/go-strftime v0.1.9 // indirect
|
github.com/ncruces/go-strftime v0.1.9 // indirect
|
||||||
github.com/ncw/swift/v2 v2.0.3 // indirect
|
github.com/ncw/swift/v2 v2.0.4 // indirect
|
||||||
github.com/nxadm/tail v1.4.11 // indirect
|
github.com/nxadm/tail v1.4.11 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/oklog/ulid v1.3.1 // indirect
|
||||||
github.com/onsi/ginkgo/v2 v2.19.0 // indirect
|
github.com/onsi/ginkgo/v2 v2.23.3 // indirect
|
||||||
github.com/onsi/gomega v1.34.1 // indirect
|
|
||||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||||
github.com/oracle/oci-go-sdk/v65 v65.80.0 // indirect
|
github.com/oracle/oci-go-sdk/v65 v65.93.0 // indirect
|
||||||
github.com/panjf2000/ants/v2 v2.9.1 // indirect
|
github.com/panjf2000/ants/v2 v2.11.3 // indirect
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
|
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
|
||||||
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
github.com/pengsrc/go-shared v0.2.1-0.20190131101655-1999055a4a14 // indirect
|
||||||
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
github.com/pierrec/lz4/v4 v4.1.21 // indirect
|
||||||
|
@ -307,31 +316,31 @@ require (
|
||||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
|
||||||
github.com/pkg/xattr v0.4.10 // indirect
|
github.com/pkg/xattr v0.4.10 // indirect
|
||||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
|
||||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
|
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 // indirect
|
||||||
github.com/relvacode/iso8601 v1.3.0 // indirect
|
github.com/relvacode/iso8601 v1.6.0 // indirect
|
||||||
github.com/rfjakob/eme v1.1.2 // indirect
|
github.com/rfjakob/eme v1.1.2 // indirect
|
||||||
github.com/rivo/uniseg v0.4.7 // indirect
|
github.com/rivo/uniseg v0.4.7 // indirect
|
||||||
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect
|
||||||
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
github.com/sagikazarmark/locafero v0.7.0 // indirect
|
||||||
github.com/samber/lo v1.47.0 // indirect
|
github.com/samber/lo v1.50.0 // indirect
|
||||||
github.com/shirou/gopsutil/v4 v4.24.12 // indirect
|
github.com/shirou/gopsutil/v4 v4.25.5 // indirect
|
||||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||||
github.com/sony/gobreaker v0.5.0 // indirect
|
github.com/sony/gobreaker v1.0.0 // indirect
|
||||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||||
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
|
github.com/spacemonkeygo/monkit/v3 v3.0.24 // indirect
|
||||||
github.com/spf13/pflag v1.0.6 // indirect
|
github.com/spf13/pflag v1.0.6 // indirect
|
||||||
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect
|
||||||
github.com/subosito/gotenv v1.6.0 // indirect
|
github.com/subosito/gotenv v1.6.0 // indirect
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20241213150454-ec0027fb0002 // indirect
|
github.com/t3rm1n4l/go-mega v0.0.0-20241213151442-a19cff0ec7b5 // indirect
|
||||||
github.com/tarantool/go-iproto v1.1.0 // indirect
|
github.com/tarantool/go-iproto v1.1.0 // indirect
|
||||||
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
|
github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a // indirect
|
||||||
github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect
|
github.com/tikv/pd/client v0.0.0-20230329114254-1948c247c2b1 // indirect
|
||||||
github.com/tinylib/msgp v1.3.0 // indirect
|
github.com/tinylib/msgp v1.3.0 // indirect
|
||||||
github.com/tklauser/go-sysconf v0.3.13 // indirect
|
github.com/tklauser/go-sysconf v0.3.15 // indirect
|
||||||
github.com/tklauser/numcpus v0.7.0 // indirect
|
github.com/tklauser/numcpus v0.10.0 // indirect
|
||||||
github.com/twmb/murmur3 v1.1.3 // indirect
|
github.com/twmb/murmur3 v1.1.3 // indirect
|
||||||
github.com/unknwon/goconfig v1.0.0 // indirect
|
github.com/unknwon/goconfig v1.0.0 // indirect
|
||||||
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect
|
||||||
|
@ -343,36 +352,37 @@ require (
|
||||||
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
|
github.com/ydb-platform/ydb-go-yc-metadata v0.6.1 // indirect
|
||||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
|
github.com/yunify/qingstor-sdk-go/v3 v3.2.0 // indirect
|
||||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||||
github.com/zeebo/blake3 v0.2.3 // indirect
|
github.com/zeebo/blake3 v0.2.4 // indirect
|
||||||
github.com/zeebo/errs v1.4.0 // indirect
|
github.com/zeebo/errs v1.4.0 // indirect
|
||||||
go.etcd.io/bbolt v1.3.10 // indirect
|
go.etcd.io/bbolt v1.4.0 // indirect
|
||||||
go.etcd.io/etcd/api/v3 v3.6.1 // indirect
|
go.etcd.io/etcd/api/v3 v3.6.1 // indirect
|
||||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||||
go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
|
go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
go.opentelemetry.io/otel/sdk v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
|
go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||||
go.uber.org/multierr v1.11.0 // indirect
|
go.uber.org/multierr v1.11.0 // indirect
|
||||||
go.uber.org/zap v1.27.0 // indirect
|
go.uber.org/zap v1.27.0 // indirect
|
||||||
golang.org/x/term v0.32.0 // indirect
|
golang.org/x/term v0.32.0 // indirect
|
||||||
golang.org/x/time v0.11.0 // indirect
|
golang.org/x/time v0.12.0 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20250505200425-f936aa4a68b2 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20250512202823-5a2f75b736a9 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
|
||||||
gopkg.in/validator.v2 v2.0.1 // indirect
|
gopkg.in/validator.v2 v2.0.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
modernc.org/libc v1.65.10 // indirect
|
modernc.org/libc v1.65.10 // indirect
|
||||||
moul.io/http2curl/v2 v2.3.0 // indirect
|
moul.io/http2curl/v2 v2.3.0 // indirect
|
||||||
storj.io/common v0.0.0-20240812101423-26b53789c348 // indirect
|
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||||
storj.io/drpc v0.0.35-0.20240709171858-0075ac871661 // indirect
|
storj.io/common v0.0.0-20250605163628-70ca83b6228e // indirect
|
||||||
storj.io/eventkit v0.0.0-20240415002644-1d9596fee086 // indirect
|
storj.io/drpc v0.0.35-0.20250513201419-f7819ea69b55 // indirect
|
||||||
|
storj.io/eventkit v0.0.0-20250410172343-61f26d3de156 // indirect
|
||||||
storj.io/infectious v0.0.2 // indirect
|
storj.io/infectious v0.0.2 // indirect
|
||||||
storj.io/picobuf v0.0.3 // indirect
|
storj.io/picobuf v0.0.4 // indirect
|
||||||
storj.io/uplink v1.13.1 // indirect
|
storj.io/uplink v1.13.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
description: SeaweedFS
|
description: SeaweedFS
|
||||||
name: seaweedfs
|
name: seaweedfs
|
||||||
appVersion: "3.91"
|
appVersion: "3.92"
|
||||||
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
|
# Dev note: Trigger a helm chart release by `git tag -a helm-<version>`
|
||||||
version: 4.0.391
|
version: 4.0.392
|
||||||
|
|
|
@ -144,3 +144,8 @@ stringData:
|
||||||
# this key must be an inline json config file
|
# this key must be an inline json config file
|
||||||
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"snu8yoP6QAlY0ne4","secretKey":"PNzBcmeLNEdR0oviwm04NQAicOrDH1Km"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"SCigFee6c5lbi04A","secretKey":"kgFhbT38R8WUYVtiFQ1OiSVOrYr3NKku"}],"actions":["Read"]}]}'
|
seaweedfs_s3_config: '{"identities":[{"name":"anvAdmin","credentials":[{"accessKey":"snu8yoP6QAlY0ne4","secretKey":"PNzBcmeLNEdR0oviwm04NQAicOrDH1Km"}],"actions":["Admin","Read","Write"]},{"name":"anvReadOnly","credentials":[{"accessKey":"SCigFee6c5lbi04A","secretKey":"kgFhbT38R8WUYVtiFQ1OiSVOrYr3NKku"}],"actions":["Read"]}]}'
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Enterprise
|
||||||
|
|
||||||
|
For enterprise users, please visit [seaweedfs.com](https://seaweedfs.com) for the SeaweedFS Enterprise Edition,
|
||||||
|
which has a self-healing storage format with better data protection.
|
||||||
|
|
|
@ -162,6 +162,9 @@ spec:
|
||||||
{{- if .Values.filer.metricsPort }}
|
{{- if .Values.filer.metricsPort }}
|
||||||
-metricsPort={{ .Values.filer.metricsPort }} \
|
-metricsPort={{ .Values.filer.metricsPort }} \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.filer.metricsIp }}
|
||||||
|
-metricsIp={{ .Values.filer.metricsIp }} \
|
||||||
|
{{- end }}
|
||||||
{{- if .Values.filer.redirectOnRead }}
|
{{- if .Values.filer.redirectOnRead }}
|
||||||
-redirectOnRead \
|
-redirectOnRead \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -187,6 +190,7 @@ spec:
|
||||||
-encryptVolumeData \
|
-encryptVolumeData \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
-ip=${POD_IP} \
|
-ip=${POD_IP} \
|
||||||
|
-ip.bind={{ .Values.filer.ipBind }} \
|
||||||
{{- if .Values.filer.filerGroup}}
|
{{- if .Values.filer.filerGroup}}
|
||||||
-filerGroup={{ .Values.filer.filerGroup}} \
|
-filerGroup={{ .Values.filer.filerGroup}} \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
@ -219,7 +223,10 @@ spec:
|
||||||
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
|
-s3.auditLogConfig=/etc/sw/filer_s3_auditLogConfig.json \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
|
-master={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
|
||||||
|
{{- range .Values.filer.extraArgs }}
|
||||||
|
{{ . }} \
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
{{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }}
|
{{- if (or (eq .Values.filer.logs.type "hostPath") (eq .Values.filer.logs.type "persistentVolumeClaim") (eq .Values.filer.logs.type "emptyDir")) }}
|
||||||
- name: seaweedfs-filer-log-volume
|
- name: seaweedfs-filer-log-volume
|
||||||
|
|
|
@ -157,18 +157,36 @@ spec:
|
||||||
{{- if .Values.master.metricsPort }}
|
{{- if .Values.master.metricsPort }}
|
||||||
-metricsPort={{ .Values.master.metricsPort }} \
|
-metricsPort={{ .Values.master.metricsPort }} \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.master.metricsIp }}
|
||||||
|
-metricsIp={{ .Values.master.metricsIp }} \
|
||||||
|
{{- end }}
|
||||||
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
|
-volumeSizeLimitMB={{ .Values.master.volumeSizeLimitMB }} \
|
||||||
{{- if .Values.master.disableHttp }}
|
{{- if .Values.master.disableHttp }}
|
||||||
-disableHttp \
|
-disableHttp \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.master.pulseSeconds }}
|
{{- if .Values.master.resumeState }}
|
||||||
-pulseSeconds={{ .Values.master.pulseSeconds }} \
|
-resumeState \
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.master.raftHashicorp }}
|
||||||
|
-raftHashicorp \
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.master.raftBootstrap }}
|
||||||
|
-raftBootstrap \
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.master.electionTimeout }}
|
||||||
|
-electionTimeout={{ .Values.master.electionTimeout }} \
|
||||||
|
{{- end }}
|
||||||
|
{{- if .Values.master.heartbeatInterval }}
|
||||||
|
-heartbeatInterval={{ .Values.master.heartbeatInterval }} \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- if .Values.master.garbageThreshold }}
|
{{- if .Values.master.garbageThreshold }}
|
||||||
-garbageThreshold={{ .Values.master.garbageThreshold }} \
|
-garbageThreshold={{ .Values.master.garbageThreshold }} \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \
|
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-master.{{ .Release.Namespace }} \
|
||||||
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}
|
-peers={{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }} \
|
||||||
|
{{- range .Values.master.extraArgs }}
|
||||||
|
{{ . }} \
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- name : data-{{ .Release.Namespace }}
|
- name : data-{{ .Release.Namespace }}
|
||||||
mountPath: /data
|
mountPath: /data
|
||||||
|
|
|
@ -150,6 +150,9 @@ spec:
|
||||||
{{- if .Values.volume.metricsPort }}
|
{{- if .Values.volume.metricsPort }}
|
||||||
-metricsPort={{ .Values.volume.metricsPort }} \
|
-metricsPort={{ .Values.volume.metricsPort }} \
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- if .Values.volume.metricsIp }}
|
||||||
|
-metricsIp={{ .Values.volume.metricsIp }} \
|
||||||
|
{{- end }}
|
||||||
-dir {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}/{{$dir.name}}{{end}} \
|
-dir {{range $index, $dir := .Values.volume.dataDirs }}{{if ne $index 0}},{{end}}/{{$dir.name}}{{end}} \
|
||||||
{{- if .Values.volume.idx }}
|
{{- if .Values.volume.idx }}
|
||||||
-dir.idx=/idx \
|
-dir.idx=/idx \
|
||||||
|
@ -183,7 +186,10 @@ spec:
|
||||||
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
|
-minFreeSpacePercent={{ .Values.volume.minFreeSpacePercent }} \
|
||||||
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume.{{ .Release.Namespace }} \
|
-ip=${POD_NAME}.${SEAWEEDFS_FULLNAME}-volume.{{ .Release.Namespace }} \
|
||||||
-compactionMBps={{ .Values.volume.compactionMBps }} \
|
-compactionMBps={{ .Values.volume.compactionMBps }} \
|
||||||
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }}
|
-mserver={{ if .Values.global.masterServer }}{{.Values.global.masterServer}}{{ else }}{{ range $index := until (.Values.master.replicas | int) }}${SEAWEEDFS_FULLNAME}-master-{{ $index }}.${SEAWEEDFS_FULLNAME}-master.{{ $.Release.Namespace }}:{{ $.Values.master.port }}{{ if lt $index (sub ($.Values.master.replicas | int) 1) }},{{ end }}{{ end }}{{ end }} \
|
||||||
|
{{- range .Values.volume.extraArgs }}
|
||||||
|
{{ . }} \
|
||||||
|
{{- end }}
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
{{- range $dir := .Values.volume.dataDirs }}
|
{{- range $dir := .Values.volume.dataDirs }}
|
||||||
{{- if not ( eq $dir.type "custom" ) }}
|
{{- if not ( eq $dir.type "custom" ) }}
|
||||||
|
|
|
@ -56,12 +56,11 @@ master:
|
||||||
port: 9333
|
port: 9333
|
||||||
grpcPort: 19333
|
grpcPort: 19333
|
||||||
metricsPort: 9327
|
metricsPort: 9327
|
||||||
|
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
|
||||||
ipBind: "0.0.0.0"
|
ipBind: "0.0.0.0"
|
||||||
volumePreallocate: false
|
volumePreallocate: false
|
||||||
volumeSizeLimitMB: 1000
|
volumeSizeLimitMB: 1000
|
||||||
loggingOverrideLevel: null
|
loggingOverrideLevel: null
|
||||||
# number of seconds between heartbeats, default 5
|
|
||||||
pulseSeconds: null
|
|
||||||
# threshold to vacuum and reclaim spaces, default 0.3 (30%)
|
# threshold to vacuum and reclaim spaces, default 0.3 (30%)
|
||||||
garbageThreshold: null
|
garbageThreshold: null
|
||||||
# Prometheus push interval in seconds, default 15
|
# Prometheus push interval in seconds, default 15
|
||||||
|
@ -75,6 +74,25 @@ master:
|
||||||
# Disable http request, only gRpc operations are allowed
|
# Disable http request, only gRpc operations are allowed
|
||||||
disableHttp: false
|
disableHttp: false
|
||||||
|
|
||||||
|
# Resume previous state on start master server
|
||||||
|
resumeState: false
|
||||||
|
# Use Hashicorp Raft
|
||||||
|
raftHashicorp: false
|
||||||
|
# Whether to bootstrap the Raft cluster. Only use it when use Hashicorp Raft
|
||||||
|
raftBootstrap: false
|
||||||
|
|
||||||
|
# election timeout of master servers
|
||||||
|
electionTimeout: "10s"
|
||||||
|
# heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)
|
||||||
|
heartbeatInterval: "300ms"
|
||||||
|
|
||||||
|
# Custom command line arguments to add to the master command
|
||||||
|
# Example to fix IPv6 metrics connectivity issues:
|
||||||
|
# extraArgs: ["-metricsIp", "0.0.0.0"]
|
||||||
|
# Example with multiple args:
|
||||||
|
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
|
||||||
|
extraArgs: []
|
||||||
|
|
||||||
config: |-
|
config: |-
|
||||||
# Enter any extra configuration for master.toml here.
|
# Enter any extra configuration for master.toml here.
|
||||||
# It may be a multi-line string.
|
# It may be a multi-line string.
|
||||||
|
@ -277,6 +295,7 @@ volume:
|
||||||
port: 8080
|
port: 8080
|
||||||
grpcPort: 18080
|
grpcPort: 18080
|
||||||
metricsPort: 9327
|
metricsPort: 9327
|
||||||
|
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
|
||||||
ipBind: "0.0.0.0"
|
ipBind: "0.0.0.0"
|
||||||
replicas: 1
|
replicas: 1
|
||||||
loggingOverrideLevel: null
|
loggingOverrideLevel: null
|
||||||
|
@ -289,6 +308,13 @@ volume:
|
||||||
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
|
# minimum free disk space(in percents). If free disk space lower this value - all volumes marks as ReadOnly
|
||||||
minFreeSpacePercent: 7
|
minFreeSpacePercent: 7
|
||||||
|
|
||||||
|
# Custom command line arguments to add to the volume command
|
||||||
|
# Example to fix IPv6 metrics connectivity issues:
|
||||||
|
# extraArgs: ["-metricsIp", "0.0.0.0"]
|
||||||
|
# Example with multiple args:
|
||||||
|
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
|
||||||
|
extraArgs: []
|
||||||
|
|
||||||
# For each data disk you may use ANY storage-class, example with local-path-provisioner
|
# For each data disk you may use ANY storage-class, example with local-path-provisioner
|
||||||
# Annotations are optional.
|
# Annotations are optional.
|
||||||
# dataDirs:
|
# dataDirs:
|
||||||
|
@ -520,6 +546,8 @@ filer:
|
||||||
port: 8888
|
port: 8888
|
||||||
grpcPort: 18888
|
grpcPort: 18888
|
||||||
metricsPort: 9327
|
metricsPort: 9327
|
||||||
|
metricsIp: "" # Metrics listen IP. If empty, defaults to ipBind
|
||||||
|
ipBind: "0.0.0.0" # IP address to bind to. Set to 0.0.0.0 to allow external traffic
|
||||||
loggingOverrideLevel: null
|
loggingOverrideLevel: null
|
||||||
filerGroup: ""
|
filerGroup: ""
|
||||||
# prefer to read and write to volumes in this data center (not set by default)
|
# prefer to read and write to volumes in this data center (not set by default)
|
||||||
|
@ -547,6 +575,13 @@ filer:
|
||||||
# Disable http request, only gRpc operations are allowed
|
# Disable http request, only gRpc operations are allowed
|
||||||
disableHttp: false
|
disableHttp: false
|
||||||
|
|
||||||
|
# Custom command line arguments to add to the filer command
|
||||||
|
# Example to fix IPv6 metrics connectivity issues:
|
||||||
|
# extraArgs: ["-metricsIp", "0.0.0.0"]
|
||||||
|
# Example with multiple args:
|
||||||
|
# extraArgs: ["-customFlag", "value", "-anotherFlag"]
|
||||||
|
extraArgs: []
|
||||||
|
|
||||||
# Add a custom notification.toml to configure filer notifications
|
# Add a custom notification.toml to configure filer notifications
|
||||||
# Example:
|
# Example:
|
||||||
# notificationConfig: |-
|
# notificationConfig: |-
|
||||||
|
|
271
telemetry/DEPLOYMENT.md
Normal file
271
telemetry/DEPLOYMENT.md
Normal file
|
@ -0,0 +1,271 @@
|
||||||
|
# SeaweedFS Telemetry Server Deployment
|
||||||
|
|
||||||
|
This document describes how to deploy the SeaweedFS telemetry server to a remote server using GitHub Actions.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
1. A remote Linux server with:
|
||||||
|
- SSH access
|
||||||
|
- systemd (for service management)
|
||||||
|
- Optional: Prometheus and Grafana (for monitoring)
|
||||||
|
|
||||||
|
2. GitHub repository secrets configured (see [Setup GitHub Secrets](#setup-github-secrets) below):
|
||||||
|
- `TELEMETRY_SSH_PRIVATE_KEY`: SSH private key for accessing the remote server
|
||||||
|
- `TELEMETRY_HOST`: Remote server hostname or IP address
|
||||||
|
- `TELEMETRY_USER`: Username for SSH access
|
||||||
|
|
||||||
|
## Setup GitHub Secrets
|
||||||
|
|
||||||
|
Before using the deployment workflow, you need to configure the required secrets in your GitHub repository.
|
||||||
|
|
||||||
|
### Step 1: Generate SSH Key Pair
|
||||||
|
|
||||||
|
On your local machine, generate a new SSH key pair specifically for deployment:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate a new SSH key pair
|
||||||
|
ssh-keygen -t ed25519 -C "seaweedfs-telemetry-deploy" -f ~/.ssh/seaweedfs_telemetry_deploy
|
||||||
|
|
||||||
|
# This creates two files:
|
||||||
|
# ~/.ssh/seaweedfs_telemetry_deploy (private key)
|
||||||
|
# ~/.ssh/seaweedfs_telemetry_deploy.pub (public key)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 2: Configure Remote Server
|
||||||
|
|
||||||
|
Copy the public key to your remote server:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Copy public key to remote server
|
||||||
|
ssh-copy-id -i ~/.ssh/seaweedfs_telemetry_deploy.pub user@your-server.com
|
||||||
|
|
||||||
|
# Or manually append to authorized_keys
|
||||||
|
cat ~/.ssh/seaweedfs_telemetry_deploy.pub | ssh user@your-server.com "mkdir -p ~/.ssh && cat >> ~/.ssh/authorized_keys"
|
||||||
|
```
|
||||||
|
|
||||||
|
Test the SSH connection:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test SSH connection with the new key
|
||||||
|
ssh -i ~/.ssh/seaweedfs_telemetry_deploy user@your-server.com "echo 'SSH connection successful'"
|
||||||
|
```
|
||||||
|
|
||||||
|
### Step 3: Add Secrets to GitHub Repository
|
||||||
|
|
||||||
|
1. Go to your GitHub repository
|
||||||
|
2. Click on **Settings** tab
|
||||||
|
3. In the sidebar, click **Secrets and variables** → **Actions**
|
||||||
|
4. Click **New repository secret** for each of the following:
|
||||||
|
|
||||||
|
#### TELEMETRY_SSH_PRIVATE_KEY
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Display the private key content
|
||||||
|
cat ~/.ssh/seaweedfs_telemetry_deploy
|
||||||
|
```
|
||||||
|
|
||||||
|
- **Name**: `TELEMETRY_SSH_PRIVATE_KEY`
|
||||||
|
- **Value**: Copy the entire private key content, including the `-----BEGIN OPENSSH PRIVATE KEY-----` and `-----END OPENSSH PRIVATE KEY-----` lines
|
||||||
|
|
||||||
|
#### TELEMETRY_HOST
|
||||||
|
|
||||||
|
- **Name**: `TELEMETRY_HOST`
|
||||||
|
- **Value**: Your server's hostname or IP address (e.g., `telemetry.example.com` or `192.168.1.100`)
|
||||||
|
|
||||||
|
#### TELEMETRY_USER
|
||||||
|
|
||||||
|
- **Name**: `TELEMETRY_USER`
|
||||||
|
- **Value**: The username on the remote server (e.g., `ubuntu`, `deploy`, or your username)
|
||||||
|
|
||||||
|
### Step 4: Verify Configuration
|
||||||
|
|
||||||
|
Create a simple test workflow or manually trigger the deployment to verify the secrets are working correctly.
|
||||||
|
|
||||||
|
### Security Best Practices
|
||||||
|
|
||||||
|
1. **Dedicated SSH Key**: Use a separate SSH key only for deployment
|
||||||
|
2. **Limited Permissions**: Create a dedicated user on the remote server with minimal required permissions
|
||||||
|
3. **Key Rotation**: Regularly rotate SSH keys
|
||||||
|
4. **Server Access**: Restrict SSH access to specific IP ranges if possible
|
||||||
|
|
||||||
|
### Example Server Setup
|
||||||
|
|
||||||
|
If you're setting up a new server, here's a basic configuration:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# On the remote server, create a dedicated user for deployment
|
||||||
|
sudo useradd -m -s /bin/bash seaweedfs-deploy
|
||||||
|
sudo usermod -aG sudo seaweedfs-deploy # Only if sudo access is needed
|
||||||
|
|
||||||
|
# Switch to the deployment user
|
||||||
|
sudo su - seaweedfs-deploy
|
||||||
|
|
||||||
|
# Create SSH directory
|
||||||
|
mkdir -p ~/.ssh
|
||||||
|
chmod 700 ~/.ssh
|
||||||
|
|
||||||
|
# Add your public key (paste the content of seaweedfs_telemetry_deploy.pub)
|
||||||
|
nano ~/.ssh/authorized_keys
|
||||||
|
chmod 600 ~/.ssh/authorized_keys
|
||||||
|
```
|
||||||
|
|
||||||
|
### Troubleshooting
|
||||||
|
|
||||||
|
#### SSH Connection Issues
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test SSH connection manually
|
||||||
|
ssh -i ~/.ssh/seaweedfs_telemetry_deploy -v user@your-server.com
|
||||||
|
|
||||||
|
# Check SSH key permissions
|
||||||
|
ls -la ~/.ssh/seaweedfs_telemetry_deploy*
|
||||||
|
# Should show: -rw------- for private key, -rw-r--r-- for public key
|
||||||
|
```
|
||||||
|
|
||||||
|
#### GitHub Actions Fails
|
||||||
|
|
||||||
|
1. **Check secrets**: Ensure all three secrets are properly set in GitHub
|
||||||
|
2. **Verify SSH key**: Make sure the entire private key (including headers/footers) is copied
|
||||||
|
3. **Test connectivity**: Manually SSH to the server from your local machine
|
||||||
|
4. **Check user permissions**: Ensure the remote user has necessary permissions
|
||||||
|
|
||||||
|
## GitHub Actions Workflow
|
||||||
|
|
||||||
|
The deployment workflow (`.github/workflows/deploy_telemetry.yml`) provides two main operations:
|
||||||
|
|
||||||
|
### 1. First-time Setup
|
||||||
|
|
||||||
|
Run this once to set up the remote server:
|
||||||
|
|
||||||
|
1. Go to GitHub Actions in your repository
|
||||||
|
2. Select "Deploy Telemetry Server" workflow
|
||||||
|
3. Click "Run workflow"
|
||||||
|
4. Check "Run first-time server setup"
|
||||||
|
5. Click "Run workflow"
|
||||||
|
|
||||||
|
This will:
|
||||||
|
- Create necessary directories on the remote server
|
||||||
|
- Set up systemd service configuration
|
||||||
|
- Configure log rotation
|
||||||
|
- Upload Grafana dashboard and Prometheus configuration
|
||||||
|
- Enable the telemetry service (but not start it yet)
|
||||||
|
|
||||||
|
**Note**: The setup only prepares the infrastructure. You need to run a deployment afterward to install and start the telemetry server.
|
||||||
|
|
||||||
|
|
||||||
|
### 2. Deploy Updates
|
||||||
|
|
||||||
|
To deploy updates, manually trigger deployment:
|
||||||
|
1. Go to GitHub Actions in your repository
|
||||||
|
2. Select "Deploy Telemetry Server" workflow
|
||||||
|
3. Click "Run workflow"
|
||||||
|
4. Check "Deploy telemetry server to remote server"
|
||||||
|
5. Click "Run workflow"
|
||||||
|
|
||||||
|
## Server Directory Structure
|
||||||
|
|
||||||
|
After setup, the remote server will have:
|
||||||
|
|
||||||
|
```
|
||||||
|
~/seaweedfs-telemetry/
|
||||||
|
├── bin/
|
||||||
|
│ └── telemetry-server # Binary executable
|
||||||
|
├── logs/
|
||||||
|
│ ├── telemetry.log # Application logs
|
||||||
|
│ └── telemetry.error.log # Error logs
|
||||||
|
├── data/ # Data directory (if needed)
|
||||||
|
├── grafana-dashboard.json # Grafana dashboard configuration
|
||||||
|
└── prometheus.yml # Prometheus configuration
|
||||||
|
```
|
||||||
|
|
||||||
|
## Service Management
|
||||||
|
|
||||||
|
The telemetry server runs as a systemd service:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Check service status
|
||||||
|
sudo systemctl status telemetry.service
|
||||||
|
|
||||||
|
# View logs
|
||||||
|
sudo journalctl -u telemetry.service -f
|
||||||
|
|
||||||
|
# Restart service
|
||||||
|
sudo systemctl restart telemetry.service
|
||||||
|
|
||||||
|
# Stop/start service
|
||||||
|
sudo systemctl stop telemetry.service
|
||||||
|
sudo systemctl start telemetry.service
|
||||||
|
```
|
||||||
|
|
||||||
|
## Accessing the Service
|
||||||
|
|
||||||
|
After deployment, the telemetry server will be available at:
|
||||||
|
|
||||||
|
- **Dashboard**: `http://your-server:8353`
|
||||||
|
- **API**: `http://your-server:8353/api/*`
|
||||||
|
- **Metrics**: `http://your-server:8353/metrics`
|
||||||
|
- **Health Check**: `http://your-server:8353/health`
|
||||||
|
|
||||||
|
## Optional: Prometheus and Grafana Integration
|
||||||
|
|
||||||
|
### Prometheus Setup
|
||||||
|
|
||||||
|
1. Install Prometheus on your server
|
||||||
|
2. Update `/etc/prometheus/prometheus.yml` to include:
|
||||||
|
```yaml
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'seaweedfs-telemetry'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:8353']
|
||||||
|
metrics_path: '/metrics'
|
||||||
|
```
|
||||||
|
|
||||||
|
### Grafana Setup
|
||||||
|
|
||||||
|
1. Install Grafana on your server
|
||||||
|
2. Import the dashboard from `~/seaweedfs-telemetry/grafana-dashboard.json`
|
||||||
|
3. Configure Prometheus as a data source pointing to your Prometheus instance
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Deployment Fails
|
||||||
|
|
||||||
|
1. Check GitHub Actions logs for detailed error messages
|
||||||
|
2. Verify SSH connectivity: `ssh user@host`
|
||||||
|
3. Ensure all required secrets are configured in GitHub
|
||||||
|
|
||||||
|
### Service Won't Start
|
||||||
|
|
||||||
|
1. Check service logs: `sudo journalctl -u telemetry.service`
|
||||||
|
2. Verify binary permissions: `ls -la ~/seaweedfs-telemetry/bin/`
|
||||||
|
3. Test binary manually: `~/seaweedfs-telemetry/bin/telemetry-server -help`
|
||||||
|
|
||||||
|
### Port Conflicts
|
||||||
|
|
||||||
|
If port 8353 is already in use:
|
||||||
|
|
||||||
|
1. Edit the systemd service: `sudo systemctl edit telemetry.service`
|
||||||
|
2. Add override configuration:
|
||||||
|
```ini
|
||||||
|
[Service]
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=/home/user/seaweedfs-telemetry/bin/telemetry-server -port=8354
|
||||||
|
```
|
||||||
|
3. Reload and restart: `sudo systemctl daemon-reload && sudo systemctl restart telemetry.service`
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
1. **Firewall**: Consider restricting access to telemetry ports
|
||||||
|
2. **SSH Keys**: Use dedicated SSH keys with minimal permissions
|
||||||
|
3. **User Permissions**: Run the service as a non-privileged user
|
||||||
|
4. **Network**: Consider running on internal networks only
|
||||||
|
|
||||||
|
## Monitoring
|
||||||
|
|
||||||
|
Monitor the deployment and service health:
|
||||||
|
|
||||||
|
- **GitHub Actions**: Check workflow runs for deployment status
|
||||||
|
- **System Logs**: `sudo journalctl -u telemetry.service`
|
||||||
|
- **Application Logs**: `tail -f ~/seaweedfs-telemetry/logs/telemetry.log`
|
||||||
|
- **Health Endpoint**: `curl http://localhost:8353/health`
|
||||||
|
- **Metrics**: `curl http://localhost:8353/metrics`
|
353
telemetry/README.md
Normal file
353
telemetry/README.md
Normal file
|
@ -0,0 +1,353 @@
|
||||||
|
# SeaweedFS Telemetry System
|
||||||
|
|
||||||
|
A privacy-respecting telemetry system for SeaweedFS that collects cluster-level usage statistics and provides visualization through Prometheus and Grafana.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
- **Privacy-First Design**: Uses in-memory cluster IDs (regenerated on restart), no personal data collection
|
||||||
|
- **Prometheus Integration**: Native Prometheus metrics for monitoring and alerting
|
||||||
|
- **Grafana Dashboards**: Pre-built dashboards for data visualization
|
||||||
|
- **Protocol Buffers**: Efficient binary data transmission for optimal performance
|
||||||
|
- **Opt-in Only**: Disabled by default, requires explicit configuration
|
||||||
|
- **Docker Compose**: Complete monitoring stack deployment
|
||||||
|
- **Automatic Cleanup**: Configurable data retention policies
|
||||||
|
|
||||||
|
## Architecture
|
||||||
|
|
||||||
|
```
|
||||||
|
SeaweedFS Cluster → Telemetry Client → Telemetry Server → Prometheus → Grafana
|
||||||
|
(protobuf) (metrics) (queries)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Data Transmission
|
||||||
|
|
||||||
|
The telemetry system uses **Protocol Buffers exclusively** for efficient binary data transmission:
|
||||||
|
|
||||||
|
- **Compact Format**: 30-50% smaller than JSON
|
||||||
|
- **Fast Serialization**: Better performance than text-based formats
|
||||||
|
- **Type Safety**: Strong typing with generated Go structs
|
||||||
|
- **Schema Evolution**: Built-in versioning support
|
||||||
|
|
||||||
|
### Protobuf Schema
|
||||||
|
|
||||||
|
```protobuf
|
||||||
|
message TelemetryData {
|
||||||
|
string cluster_id = 1; // In-memory generated UUID
|
||||||
|
string version = 2; // SeaweedFS version
|
||||||
|
string os = 3; // Operating system
|
||||||
|
// Field 4 reserved (was features)
|
||||||
|
// Field 5 reserved (was deployment)
|
||||||
|
int32 volume_server_count = 6; // Number of volume servers
|
||||||
|
uint64 total_disk_bytes = 7; // Total disk usage
|
||||||
|
int32 total_volume_count = 8; // Total volume count
|
||||||
|
int32 filer_count = 9; // Number of filer servers
|
||||||
|
int32 broker_count = 10; // Number of broker servers
|
||||||
|
int64 timestamp = 11; // Collection timestamp
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Privacy Approach
|
||||||
|
|
||||||
|
- **No Personal Data**: No hostnames, IP addresses, or user information
|
||||||
|
- **In-Memory IDs**: Cluster IDs are generated in-memory and change on restart
|
||||||
|
- **Aggregated Data**: Only cluster-level statistics, no individual file/user data
|
||||||
|
- **Opt-in Only**: Telemetry is disabled by default
|
||||||
|
- **Transparent**: Open source implementation, clear data collection policy
|
||||||
|
|
||||||
|
## Collected Data
|
||||||
|
|
||||||
|
| Field | Description | Example |
|
||||||
|
|-------|-------------|---------|
|
||||||
|
| `cluster_id` | In-memory UUID (changes on restart) | `a1b2c3d4-...` |
|
||||||
|
| `version` | SeaweedFS version | `3.45` |
|
||||||
|
| `os` | Operating system and architecture | `linux/amd64` |
|
||||||
|
| `volume_server_count` | Number of volume servers | `5` |
|
||||||
|
| `total_disk_bytes` | Total disk usage across cluster | `1073741824` |
|
||||||
|
| `total_volume_count` | Total number of volumes | `120` |
|
||||||
|
| `filer_count` | Number of filer servers | `2` |
|
||||||
|
| `broker_count` | Number of broker servers | `1` |
|
||||||
|
| `timestamp` | When data was collected | `1640995200` |
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### 1. Deploy Telemetry Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Clone and start the complete monitoring stack
|
||||||
|
git clone https://github.com/seaweedfs/seaweedfs.git
|
||||||
|
cd seaweedfs/telemetry
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# Or run the server directly
|
||||||
|
cd server
|
||||||
|
go run . -port=8080 -dashboard=true
|
||||||
|
```
|
||||||
|
|
||||||
|
### 2. Configure SeaweedFS
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable telemetry in SeaweedFS master (uses default telemetry.seaweedfs.com)
|
||||||
|
weed master -telemetry=true
|
||||||
|
|
||||||
|
# Or in server mode
|
||||||
|
weed server -telemetry=true
|
||||||
|
|
||||||
|
# Or specify custom telemetry server
|
||||||
|
weed master -telemetry=true -telemetry.url=http://localhost:8080/api/collect
|
||||||
|
```
|
||||||
|
|
||||||
|
### 3. Access Dashboards
|
||||||
|
|
||||||
|
- **Telemetry Server**: http://localhost:8080
|
||||||
|
- **Prometheus**: http://localhost:9090
|
||||||
|
- **Grafana**: http://localhost:3000 (admin/admin)
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### SeaweedFS Master/Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable telemetry
|
||||||
|
-telemetry=true
|
||||||
|
|
||||||
|
# Set custom telemetry server URL (optional, defaults to telemetry.seaweedfs.com)
|
||||||
|
-telemetry.url=http://your-telemetry-server:8080/api/collect
|
||||||
|
```
|
||||||
|
|
||||||
|
### Telemetry Server
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Server configuration
|
||||||
|
-port=8080 # Server port
|
||||||
|
-dashboard=true # Enable built-in dashboard
|
||||||
|
-cleanup=24h # Cleanup interval
|
||||||
|
-max-age=720h # Maximum data retention (30 days)
|
||||||
|
|
||||||
|
# Example
|
||||||
|
./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h
|
||||||
|
```
|
||||||
|
|
||||||
|
## Prometheus Metrics
|
||||||
|
|
||||||
|
The telemetry server exposes these Prometheus metrics:
|
||||||
|
|
||||||
|
### Cluster Metrics
|
||||||
|
- `seaweedfs_telemetry_total_clusters`: Total unique clusters (30 days)
|
||||||
|
- `seaweedfs_telemetry_active_clusters`: Active clusters (7 days)
|
||||||
|
|
||||||
|
### Per-Cluster Metrics
|
||||||
|
- `seaweedfs_telemetry_volume_servers{cluster_id, version, os}`: Volume servers per cluster
|
||||||
|
- `seaweedfs_telemetry_disk_bytes{cluster_id, version, os}`: Disk usage per cluster
|
||||||
|
- `seaweedfs_telemetry_volume_count{cluster_id, version, os}`: Volume count per cluster
|
||||||
|
- `seaweedfs_telemetry_filer_count{cluster_id, version, os}`: Filer servers per cluster
|
||||||
|
- `seaweedfs_telemetry_broker_count{cluster_id, version, os}`: Broker servers per cluster
|
||||||
|
- `seaweedfs_telemetry_cluster_info{cluster_id, version, os}`: Cluster metadata
|
||||||
|
|
||||||
|
### Server Metrics
|
||||||
|
- `seaweedfs_telemetry_reports_received_total`: Total telemetry reports received
|
||||||
|
|
||||||
|
## API Endpoints
|
||||||
|
|
||||||
|
### Data Collection
|
||||||
|
```bash
|
||||||
|
# Submit telemetry data (protobuf only)
|
||||||
|
POST /api/collect
|
||||||
|
Content-Type: application/x-protobuf
|
||||||
|
[TelemetryRequest protobuf data]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Statistics (JSON for dashboard/debugging)
|
||||||
|
```bash
|
||||||
|
# Get aggregated statistics
|
||||||
|
GET /api/stats
|
||||||
|
|
||||||
|
# Get recent cluster instances
|
||||||
|
GET /api/instances?limit=100
|
||||||
|
|
||||||
|
# Get metrics over time
|
||||||
|
GET /api/metrics?days=30
|
||||||
|
```
|
||||||
|
|
||||||
|
### Monitoring
|
||||||
|
```bash
|
||||||
|
# Prometheus metrics
|
||||||
|
GET /metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
## Docker Deployment
|
||||||
|
|
||||||
|
### Complete Stack (Recommended)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# docker-compose.yml
|
||||||
|
version: '3.8'
|
||||||
|
services:
|
||||||
|
telemetry-server:
|
||||||
|
build: ./server
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
command: ["-port=8080", "-dashboard=true", "-cleanup=24h"]
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus:latest
|
||||||
|
ports:
|
||||||
|
- "9090:9090"
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||||
|
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:latest
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||||
|
volumes:
|
||||||
|
- ./grafana-provisioning:/etc/grafana/provisioning
|
||||||
|
- ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs.json
|
||||||
|
```
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Deploy the stack
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# Scale telemetry server if needed
|
||||||
|
docker-compose up -d --scale telemetry-server=3
|
||||||
|
```
|
||||||
|
|
||||||
|
### Server Only
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build and run telemetry server
|
||||||
|
cd server
|
||||||
|
docker build -t seaweedfs-telemetry .
|
||||||
|
docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true
|
||||||
|
```
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
### Protocol Buffer Development
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Generate protobuf code
|
||||||
|
cd telemetry
|
||||||
|
protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto
|
||||||
|
|
||||||
|
# The generated code is already included in the repository
|
||||||
|
```
|
||||||
|
|
||||||
|
### Build from Source
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Build telemetry server
|
||||||
|
cd telemetry/server
|
||||||
|
go build -o telemetry-server .
|
||||||
|
|
||||||
|
# Build SeaweedFS with telemetry support
|
||||||
|
cd ../..
|
||||||
|
go build -o weed ./weed
|
||||||
|
```
|
||||||
|
|
||||||
|
### Testing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Test telemetry server
|
||||||
|
cd telemetry/server
|
||||||
|
go test ./...
|
||||||
|
|
||||||
|
# Test protobuf communication (requires protobuf tools)
|
||||||
|
# See telemetry client code for examples
|
||||||
|
```
|
||||||
|
|
||||||
|
## Grafana Dashboard
|
||||||
|
|
||||||
|
The included Grafana dashboard provides:
|
||||||
|
|
||||||
|
- **Overview**: Total and active clusters, version distribution
|
||||||
|
- **Resource Usage**: Volume servers and disk usage over time
|
||||||
|
- **Infrastructure**: Operating system distribution and server counts
|
||||||
|
- **Growth Trends**: Historical growth patterns
|
||||||
|
|
||||||
|
### Custom Queries
|
||||||
|
|
||||||
|
```promql
|
||||||
|
# Total active clusters
|
||||||
|
seaweedfs_telemetry_active_clusters
|
||||||
|
|
||||||
|
# Disk usage by version
|
||||||
|
sum by (version) (seaweedfs_telemetry_disk_bytes)
|
||||||
|
|
||||||
|
# Volume servers by operating system
|
||||||
|
sum by (os) (seaweedfs_telemetry_volume_servers)
|
||||||
|
|
||||||
|
# Filer servers by version
|
||||||
|
sum by (version) (seaweedfs_telemetry_filer_count)
|
||||||
|
|
||||||
|
# Broker servers across all clusters
|
||||||
|
sum(seaweedfs_telemetry_broker_count)
|
||||||
|
|
||||||
|
# Growth rate (weekly)
|
||||||
|
increase(seaweedfs_telemetry_total_clusters[7d])
|
||||||
|
```
|
||||||
|
|
||||||
|
## Security Considerations
|
||||||
|
|
||||||
|
- **Network Security**: Use HTTPS in production environments
|
||||||
|
- **Access Control**: Implement authentication for Grafana and Prometheus
|
||||||
|
- **Data Retention**: Configure appropriate retention policies
|
||||||
|
- **Monitoring**: Monitor the telemetry infrastructure itself
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Issues
|
||||||
|
|
||||||
|
**SeaweedFS not sending data:**
|
||||||
|
```bash
|
||||||
|
# Check telemetry configuration
|
||||||
|
weed master -h | grep telemetry
|
||||||
|
|
||||||
|
# Verify connectivity
|
||||||
|
curl -v http://your-telemetry-server:8080/api/collect
|
||||||
|
```
|
||||||
|
|
||||||
|
**Server not receiving data:**
|
||||||
|
```bash
|
||||||
|
# Check server logs
|
||||||
|
docker-compose logs telemetry-server
|
||||||
|
|
||||||
|
# Verify metrics endpoint
|
||||||
|
curl http://localhost:8080/metrics
|
||||||
|
```
|
||||||
|
|
||||||
|
**Prometheus not scraping:**
|
||||||
|
```bash
|
||||||
|
# Check Prometheus targets
|
||||||
|
curl http://localhost:9090/api/v1/targets
|
||||||
|
|
||||||
|
# Verify configuration
|
||||||
|
docker-compose logs prometheus
|
||||||
|
```
|
||||||
|
|
||||||
|
### Debugging
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Enable verbose logging in SeaweedFS
|
||||||
|
weed master -v=2 -telemetry=true
|
||||||
|
|
||||||
|
# Check telemetry server metrics
|
||||||
|
curl http://localhost:8080/metrics | grep seaweedfs_telemetry
|
||||||
|
|
||||||
|
# Test data flow
|
||||||
|
curl http://localhost:8080/api/stats
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
1. Fork the repository
|
||||||
|
2. Create a feature branch
|
||||||
|
3. Make your changes
|
||||||
|
4. Add tests if applicable
|
||||||
|
5. Submit a pull request
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
This telemetry system is part of SeaweedFS and follows the same Apache 2.0 license.
|
55
telemetry/docker-compose.yml
Normal file
55
telemetry/docker-compose.yml
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
version: '3.8'
|
||||||
|
|
||||||
|
services:
|
||||||
|
telemetry-server:
|
||||||
|
build: ./server
|
||||||
|
ports:
|
||||||
|
- "8080:8080"
|
||||||
|
command: [
|
||||||
|
"./telemetry-server",
|
||||||
|
"-port=8080",
|
||||||
|
"-dashboard=false", # Disable built-in dashboard, use Grafana
|
||||||
|
"-log=true",
|
||||||
|
"-cors=true"
|
||||||
|
]
|
||||||
|
networks:
|
||||||
|
- telemetry
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus:latest
|
||||||
|
ports:
|
||||||
|
- "9090:9090"
|
||||||
|
volumes:
|
||||||
|
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||||
|
- prometheus_data:/prometheus
|
||||||
|
command:
|
||||||
|
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||||
|
- '--storage.tsdb.path=/prometheus'
|
||||||
|
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||||
|
- '--web.console.templates=/etc/prometheus/consoles'
|
||||||
|
- '--storage.tsdb.retention.time=200h'
|
||||||
|
- '--web.enable-lifecycle'
|
||||||
|
networks:
|
||||||
|
- telemetry
|
||||||
|
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana:latest
|
||||||
|
ports:
|
||||||
|
- "3000:3000"
|
||||||
|
environment:
|
||||||
|
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||||
|
- GF_USERS_ALLOW_SIGN_UP=false
|
||||||
|
volumes:
|
||||||
|
- grafana_data:/var/lib/grafana
|
||||||
|
- ./grafana-dashboard.json:/var/lib/grafana/dashboards/seaweedfs-telemetry.json
|
||||||
|
- ./grafana-provisioning:/etc/grafana/provisioning
|
||||||
|
networks:
|
||||||
|
- telemetry
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
prometheus_data:
|
||||||
|
grafana_data:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
telemetry:
|
||||||
|
driver: bridge
|
734
telemetry/grafana-dashboard.json
Normal file
734
telemetry/grafana-dashboard.json
Normal file
|
@ -0,0 +1,734 @@
|
||||||
|
{
|
||||||
|
"annotations": {
|
||||||
|
"list": [
|
||||||
|
{
|
||||||
|
"builtIn": 1,
|
||||||
|
"datasource": {
|
||||||
|
"type": "grafana",
|
||||||
|
"uid": "-- Grafana --"
|
||||||
|
},
|
||||||
|
"enable": true,
|
||||||
|
"hide": true,
|
||||||
|
"iconColor": "rgba(0, 211, 255, 1)",
|
||||||
|
"name": "Annotations & Alerts",
|
||||||
|
"type": "dashboard"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"editable": true,
|
||||||
|
"fiscalYearStartMonth": 0,
|
||||||
|
"graphTooltip": 0,
|
||||||
|
"id": null,
|
||||||
|
"links": [],
|
||||||
|
"liveNow": false,
|
||||||
|
"panels": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"align": "auto",
|
||||||
|
"cellOptions": {
|
||||||
|
"type": "auto"
|
||||||
|
},
|
||||||
|
"inspect": false
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 1,
|
||||||
|
"options": {
|
||||||
|
"showHeader": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.0.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "seaweedfs_telemetry_total_clusters",
|
||||||
|
"format": "time_series",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total SeaweedFS Clusters",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "thresholds"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"align": "auto",
|
||||||
|
"cellOptions": {
|
||||||
|
"type": "auto"
|
||||||
|
},
|
||||||
|
"inspect": false
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 0
|
||||||
|
},
|
||||||
|
"id": 2,
|
||||||
|
"options": {
|
||||||
|
"showHeader": true
|
||||||
|
},
|
||||||
|
"pluginVersion": "10.0.0",
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "seaweedfs_telemetry_active_clusters",
|
||||||
|
"format": "time_series",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Active Clusters (7 days)",
|
||||||
|
"type": "stat"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": []
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 8
|
||||||
|
},
|
||||||
|
"id": 3,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"displayMode": "visible",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"pieType": "pie",
|
||||||
|
"reduceOptions": {
|
||||||
|
"values": false,
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": ""
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "count by (version) (seaweedfs_telemetry_cluster_info)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{version}}",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "SeaweedFS Version Distribution",
|
||||||
|
"type": "piechart"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": []
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 8
|
||||||
|
},
|
||||||
|
"id": 4,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"displayMode": "visible",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"pieType": "pie",
|
||||||
|
"reduceOptions": {
|
||||||
|
"values": false,
|
||||||
|
"calcs": [
|
||||||
|
"lastNotNull"
|
||||||
|
],
|
||||||
|
"fields": ""
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "count by (os) (seaweedfs_telemetry_cluster_info)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "{{os}}",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Operating System Distribution",
|
||||||
|
"type": "piechart"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 24,
|
||||||
|
"x": 0,
|
||||||
|
"y": 16
|
||||||
|
},
|
||||||
|
"id": 5,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "sum(seaweedfs_telemetry_volume_servers)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "Total Volume Servers",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Volume Servers Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"unit": "bytes"
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 24
|
||||||
|
},
|
||||||
|
"id": 6,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "sum(seaweedfs_telemetry_disk_bytes)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "Total Disk Usage",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Disk Usage Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 24
|
||||||
|
},
|
||||||
|
"id": 7,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "sum(seaweedfs_telemetry_volume_count)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "Total Volume Count",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Volume Count Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 0,
|
||||||
|
"y": 32
|
||||||
|
},
|
||||||
|
"id": 8,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "sum(seaweedfs_telemetry_filer_count)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "Total Filer Count",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Filer Servers Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"fieldConfig": {
|
||||||
|
"defaults": {
|
||||||
|
"color": {
|
||||||
|
"mode": "palette-classic"
|
||||||
|
},
|
||||||
|
"custom": {
|
||||||
|
"axisLabel": "",
|
||||||
|
"axisPlacement": "auto",
|
||||||
|
"barAlignment": 0,
|
||||||
|
"drawStyle": "line",
|
||||||
|
"fillOpacity": 0,
|
||||||
|
"gradientMode": "none",
|
||||||
|
"hideFrom": {
|
||||||
|
"legend": false,
|
||||||
|
"tooltip": false,
|
||||||
|
"vis": false
|
||||||
|
},
|
||||||
|
"lineInterpolation": "linear",
|
||||||
|
"lineWidth": 1,
|
||||||
|
"pointSize": 5,
|
||||||
|
"scaleDistribution": {
|
||||||
|
"type": "linear"
|
||||||
|
},
|
||||||
|
"showPoints": "auto",
|
||||||
|
"spanNulls": false,
|
||||||
|
"stacking": {
|
||||||
|
"group": "A",
|
||||||
|
"mode": "none"
|
||||||
|
},
|
||||||
|
"thresholdsStyle": {
|
||||||
|
"mode": "off"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"mappings": [],
|
||||||
|
"thresholds": {
|
||||||
|
"mode": "absolute",
|
||||||
|
"steps": [
|
||||||
|
{
|
||||||
|
"color": "green",
|
||||||
|
"value": null
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"color": "red",
|
||||||
|
"value": 80
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"overrides": []
|
||||||
|
},
|
||||||
|
"gridPos": {
|
||||||
|
"h": 8,
|
||||||
|
"w": 12,
|
||||||
|
"x": 12,
|
||||||
|
"y": 32
|
||||||
|
},
|
||||||
|
"id": 9,
|
||||||
|
"options": {
|
||||||
|
"legend": {
|
||||||
|
"calcs": [],
|
||||||
|
"displayMode": "list",
|
||||||
|
"placement": "bottom",
|
||||||
|
"showLegend": true
|
||||||
|
},
|
||||||
|
"tooltip": {
|
||||||
|
"mode": "single",
|
||||||
|
"sort": "none"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"targets": [
|
||||||
|
{
|
||||||
|
"datasource": {
|
||||||
|
"type": "prometheus",
|
||||||
|
"uid": "${DS_PROMETHEUS}"
|
||||||
|
},
|
||||||
|
"expr": "sum(seaweedfs_telemetry_broker_count)",
|
||||||
|
"format": "time_series",
|
||||||
|
"legendFormat": "Total Broker Count",
|
||||||
|
"refId": "A"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"title": "Total Broker Servers Over Time",
|
||||||
|
"type": "timeseries"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"refresh": "5m",
|
||||||
|
"schemaVersion": 38,
|
||||||
|
"style": "dark",
|
||||||
|
"tags": [
|
||||||
|
"seaweedfs",
|
||||||
|
"telemetry"
|
||||||
|
],
|
||||||
|
"templating": {
|
||||||
|
"list": []
|
||||||
|
},
|
||||||
|
"time": {
|
||||||
|
"from": "now-24h",
|
||||||
|
"to": "now"
|
||||||
|
},
|
||||||
|
"timepicker": {},
|
||||||
|
"timezone": "",
|
||||||
|
"title": "SeaweedFS Telemetry Dashboard",
|
||||||
|
"uid": "seaweedfs-telemetry",
|
||||||
|
"version": 1,
|
||||||
|
"weekStart": ""
|
||||||
|
}
|
12
telemetry/grafana-provisioning/dashboards/dashboards.yml
Normal file
12
telemetry/grafana-provisioning/dashboards/dashboards.yml
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
providers:
|
||||||
|
- name: 'seaweedfs'
|
||||||
|
orgId: 1
|
||||||
|
folder: ''
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
updateIntervalSeconds: 10
|
||||||
|
allowUiUpdates: true
|
||||||
|
options:
|
||||||
|
path: /var/lib/grafana/dashboards
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
datasources:
|
||||||
|
- name: Prometheus
|
||||||
|
type: prometheus
|
||||||
|
access: proxy
|
||||||
|
url: http://prometheus:9090
|
||||||
|
isDefault: true
|
||||||
|
editable: true
|
15
telemetry/prometheus.yml
Normal file
15
telemetry/prometheus.yml
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
evaluation_interval: 15s
|
||||||
|
|
||||||
|
rule_files:
|
||||||
|
# - "first_rules.yml"
|
||||||
|
# - "second_rules.yml"
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
- job_name: 'seaweedfs-telemetry'
|
||||||
|
static_configs:
|
||||||
|
- targets: ['telemetry-server:8080']
|
||||||
|
scrape_interval: 30s
|
||||||
|
metrics_path: '/metrics'
|
||||||
|
scrape_timeout: 10s
|
377
telemetry/proto/telemetry.pb.go
Normal file
377
telemetry/proto/telemetry.pb.go
Normal file
|
@ -0,0 +1,377 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// versions:
|
||||||
|
// protoc-gen-go v1.34.2
|
||||||
|
// protoc v5.29.3
|
||||||
|
// source: telemetry.proto
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
|
reflect "reflect"
|
||||||
|
sync "sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Verify that this generated code is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||||
|
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||||
|
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||||
|
)
|
||||||
|
|
||||||
|
// TelemetryData represents cluster-level telemetry information
|
||||||
|
type TelemetryData struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
// Unique cluster identifier (generated in-memory)
|
||||||
|
ClusterId string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
|
||||||
|
// SeaweedFS version
|
||||||
|
Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
|
||||||
|
// Operating system (e.g., "linux/amd64")
|
||||||
|
Os string `protobuf:"bytes,3,opt,name=os,proto3" json:"os,omitempty"`
|
||||||
|
// Number of volume servers in the cluster
|
||||||
|
VolumeServerCount int32 `protobuf:"varint,6,opt,name=volume_server_count,json=volumeServerCount,proto3" json:"volume_server_count,omitempty"`
|
||||||
|
// Total disk usage across all volume servers (in bytes)
|
||||||
|
TotalDiskBytes uint64 `protobuf:"varint,7,opt,name=total_disk_bytes,json=totalDiskBytes,proto3" json:"total_disk_bytes,omitempty"`
|
||||||
|
// Total number of volumes in the cluster
|
||||||
|
TotalVolumeCount int32 `protobuf:"varint,8,opt,name=total_volume_count,json=totalVolumeCount,proto3" json:"total_volume_count,omitempty"`
|
||||||
|
// Number of filer servers in the cluster
|
||||||
|
FilerCount int32 `protobuf:"varint,9,opt,name=filer_count,json=filerCount,proto3" json:"filer_count,omitempty"`
|
||||||
|
// Number of broker servers in the cluster
|
||||||
|
BrokerCount int32 `protobuf:"varint,10,opt,name=broker_count,json=brokerCount,proto3" json:"broker_count,omitempty"`
|
||||||
|
// Unix timestamp when the data was collected
|
||||||
|
Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) Reset() {
|
||||||
|
*x = TelemetryData{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_telemetry_proto_msgTypes[0]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TelemetryData) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TelemetryData) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_telemetry_proto_msgTypes[0]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TelemetryData.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TelemetryData) Descriptor() ([]byte, []int) {
|
||||||
|
return file_telemetry_proto_rawDescGZIP(), []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetClusterId() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.ClusterId
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetVersion() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Version
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetOs() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Os
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetVolumeServerCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.VolumeServerCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetTotalDiskBytes() uint64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.TotalDiskBytes
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetTotalVolumeCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.TotalVolumeCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetFilerCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.FilerCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetBrokerCount() int32 {
|
||||||
|
if x != nil {
|
||||||
|
return x.BrokerCount
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryData) GetTimestamp() int64 {
|
||||||
|
if x != nil {
|
||||||
|
return x.Timestamp
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server
|
||||||
|
type TelemetryRequest struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Data *TelemetryData `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryRequest) Reset() {
|
||||||
|
*x = TelemetryRequest{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_telemetry_proto_msgTypes[1]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryRequest) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TelemetryRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TelemetryRequest) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_telemetry_proto_msgTypes[1]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TelemetryRequest.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TelemetryRequest) Descriptor() ([]byte, []int) {
|
||||||
|
return file_telemetry_proto_rawDescGZIP(), []int{1}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryRequest) GetData() *TelemetryData {
|
||||||
|
if x != nil {
|
||||||
|
return x.Data
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TelemetryResponse is returned by the telemetry server
|
||||||
|
type TelemetryResponse struct {
|
||||||
|
state protoimpl.MessageState
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
|
||||||
|
Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"`
|
||||||
|
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryResponse) Reset() {
|
||||||
|
*x = TelemetryResponse{}
|
||||||
|
if protoimpl.UnsafeEnabled {
|
||||||
|
mi := &file_telemetry_proto_msgTypes[2]
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryResponse) String() string {
|
||||||
|
return protoimpl.X.MessageStringOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*TelemetryResponse) ProtoMessage() {}
|
||||||
|
|
||||||
|
func (x *TelemetryResponse) ProtoReflect() protoreflect.Message {
|
||||||
|
mi := &file_telemetry_proto_msgTypes[2]
|
||||||
|
if protoimpl.UnsafeEnabled && x != nil {
|
||||||
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
|
if ms.LoadMessageInfo() == nil {
|
||||||
|
ms.StoreMessageInfo(mi)
|
||||||
|
}
|
||||||
|
return ms
|
||||||
|
}
|
||||||
|
return mi.MessageOf(x)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Deprecated: Use TelemetryResponse.ProtoReflect.Descriptor instead.
|
||||||
|
func (*TelemetryResponse) Descriptor() ([]byte, []int) {
|
||||||
|
return file_telemetry_proto_rawDescGZIP(), []int{2}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryResponse) GetSuccess() bool {
|
||||||
|
if x != nil {
|
||||||
|
return x.Success
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x *TelemetryResponse) GetMessage() string {
|
||||||
|
if x != nil {
|
||||||
|
return x.Message
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var File_telemetry_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
|
var file_telemetry_proto_rawDesc = []byte{
|
||||||
|
0x0a, 0x0f, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x12, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x22, 0xce, 0x02, 0x0a,
|
||||||
|
0x0d, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1d,
|
||||||
|
0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
|
||||||
|
0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x12, 0x18, 0x0a,
|
||||||
|
0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
|
||||||
|
0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x73, 0x18, 0x03, 0x20,
|
||||||
|
0x01, 0x28, 0x09, 0x52, 0x02, 0x6f, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
|
||||||
|
0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06,
|
||||||
|
0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x76, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x76,
|
||||||
|
0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c,
|
||||||
|
0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
|
||||||
|
0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x44, 0x69, 0x73, 0x6b, 0x42, 0x79, 0x74, 0x65,
|
||||||
|
0x73, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x76, 0x6f, 0x6c, 0x75, 0x6d,
|
||||||
|
0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x10, 0x74,
|
||||||
|
0x6f, 0x74, 0x61, 0x6c, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
|
||||||
|
0x1f, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x09,
|
||||||
|
0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x72, 0x43, 0x6f, 0x75, 0x6e, 0x74,
|
||||||
|
0x12, 0x21, 0x0a, 0x0c, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
|
||||||
|
0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x43, 0x6f,
|
||||||
|
0x75, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
|
||||||
|
0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||||
|
0x70, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x40, 0x0a,
|
||||||
|
0x10, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
||||||
|
0x74, 0x12, 0x2c, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
|
||||||
|
0x18, 0x2e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x54, 0x65, 0x6c, 0x65,
|
||||||
|
0x6d, 0x65, 0x74, 0x72, 0x79, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22,
|
||||||
|
0x47, 0x0a, 0x11, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70,
|
||||||
|
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18,
|
||||||
|
0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18,
|
||||||
|
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||||
|
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x30, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68,
|
||||||
|
0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
|
||||||
|
0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x74, 0x65, 0x6c, 0x65, 0x6d,
|
||||||
|
0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||||
|
0x6f, 0x33,
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
file_telemetry_proto_rawDescOnce sync.Once
|
||||||
|
file_telemetry_proto_rawDescData = file_telemetry_proto_rawDesc
|
||||||
|
)
|
||||||
|
|
||||||
|
func file_telemetry_proto_rawDescGZIP() []byte {
|
||||||
|
file_telemetry_proto_rawDescOnce.Do(func() {
|
||||||
|
file_telemetry_proto_rawDescData = protoimpl.X.CompressGZIP(file_telemetry_proto_rawDescData)
|
||||||
|
})
|
||||||
|
return file_telemetry_proto_rawDescData
|
||||||
|
}
|
||||||
|
|
||||||
|
var file_telemetry_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
|
||||||
|
var file_telemetry_proto_goTypes = []any{
|
||||||
|
(*TelemetryData)(nil), // 0: telemetry.TelemetryData
|
||||||
|
(*TelemetryRequest)(nil), // 1: telemetry.TelemetryRequest
|
||||||
|
(*TelemetryResponse)(nil), // 2: telemetry.TelemetryResponse
|
||||||
|
}
|
||||||
|
var file_telemetry_proto_depIdxs = []int32{
|
||||||
|
0, // 0: telemetry.TelemetryRequest.data:type_name -> telemetry.TelemetryData
|
||||||
|
1, // [1:1] is the sub-list for method output_type
|
||||||
|
1, // [1:1] is the sub-list for method input_type
|
||||||
|
1, // [1:1] is the sub-list for extension type_name
|
||||||
|
1, // [1:1] is the sub-list for extension extendee
|
||||||
|
0, // [0:1] is the sub-list for field type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { file_telemetry_proto_init() }
|
||||||
|
func file_telemetry_proto_init() {
|
||||||
|
if File_telemetry_proto != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !protoimpl.UnsafeEnabled {
|
||||||
|
file_telemetry_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
||||||
|
switch v := v.(*TelemetryData); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_telemetry_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
||||||
|
switch v := v.(*TelemetryRequest); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
file_telemetry_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
||||||
|
switch v := v.(*TelemetryResponse); i {
|
||||||
|
case 0:
|
||||||
|
return &v.state
|
||||||
|
case 1:
|
||||||
|
return &v.sizeCache
|
||||||
|
case 2:
|
||||||
|
return &v.unknownFields
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
type x struct{}
|
||||||
|
out := protoimpl.TypeBuilder{
|
||||||
|
File: protoimpl.DescBuilder{
|
||||||
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
|
RawDescriptor: file_telemetry_proto_rawDesc,
|
||||||
|
NumEnums: 0,
|
||||||
|
NumMessages: 3,
|
||||||
|
NumExtensions: 0,
|
||||||
|
NumServices: 0,
|
||||||
|
},
|
||||||
|
GoTypes: file_telemetry_proto_goTypes,
|
||||||
|
DependencyIndexes: file_telemetry_proto_depIdxs,
|
||||||
|
MessageInfos: file_telemetry_proto_msgTypes,
|
||||||
|
}.Build()
|
||||||
|
File_telemetry_proto = out.File
|
||||||
|
file_telemetry_proto_rawDesc = nil
|
||||||
|
file_telemetry_proto_goTypes = nil
|
||||||
|
file_telemetry_proto_depIdxs = nil
|
||||||
|
}
|
52
telemetry/proto/telemetry.proto
Normal file
52
telemetry/proto/telemetry.proto
Normal file
|
@ -0,0 +1,52 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package telemetry;
|
||||||
|
|
||||||
|
option go_package = "github.com/seaweedfs/seaweedfs/telemetry/proto";
|
||||||
|
|
||||||
|
// TelemetryData represents cluster-level telemetry information
|
||||||
|
message TelemetryData {
|
||||||
|
// Unique cluster identifier (generated in-memory)
|
||||||
|
string cluster_id = 1;
|
||||||
|
|
||||||
|
// SeaweedFS version
|
||||||
|
string version = 2;
|
||||||
|
|
||||||
|
// Operating system (e.g., "linux/amd64")
|
||||||
|
string os = 3;
|
||||||
|
|
||||||
|
// Field 4 reserved (was features)
|
||||||
|
reserved 4;
|
||||||
|
|
||||||
|
// Field 5 reserved (was deployment)
|
||||||
|
reserved 5;
|
||||||
|
|
||||||
|
// Number of volume servers in the cluster
|
||||||
|
int32 volume_server_count = 6;
|
||||||
|
|
||||||
|
// Total disk usage across all volume servers (in bytes)
|
||||||
|
uint64 total_disk_bytes = 7;
|
||||||
|
|
||||||
|
// Total number of volumes in the cluster
|
||||||
|
int32 total_volume_count = 8;
|
||||||
|
|
||||||
|
// Number of filer servers in the cluster
|
||||||
|
int32 filer_count = 9;
|
||||||
|
|
||||||
|
// Number of broker servers in the cluster
|
||||||
|
int32 broker_count = 10;
|
||||||
|
|
||||||
|
// Unix timestamp when the data was collected
|
||||||
|
int64 timestamp = 11;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TelemetryRequest is sent from SeaweedFS clusters to the telemetry server
|
||||||
|
message TelemetryRequest {
|
||||||
|
TelemetryData data = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TelemetryResponse is returned by the telemetry server
|
||||||
|
message TelemetryResponse {
|
||||||
|
bool success = 1;
|
||||||
|
string message = 2;
|
||||||
|
}
|
18
telemetry/server/Dockerfile
Normal file
18
telemetry/server/Dockerfile
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
FROM golang:1.21-alpine AS builder
|
||||||
|
|
||||||
|
WORKDIR /app
|
||||||
|
COPY go.mod go.sum ./
|
||||||
|
RUN go mod download
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-extldflags "-static"' -o telemetry-server .
|
||||||
|
|
||||||
|
FROM alpine:latest
|
||||||
|
RUN apk --no-cache add ca-certificates
|
||||||
|
WORKDIR /root/
|
||||||
|
|
||||||
|
COPY --from=builder /app/telemetry-server .
|
||||||
|
|
||||||
|
EXPOSE 8080
|
||||||
|
|
||||||
|
CMD ["./telemetry-server"]
|
97
telemetry/server/Makefile
Normal file
97
telemetry/server/Makefile
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
.PHONY: build run clean test deps proto integration-test test-all
|
||||||
|
|
||||||
|
# Build the telemetry server
|
||||||
|
build:
|
||||||
|
go build -o telemetry-server .
|
||||||
|
|
||||||
|
# Run the server in development mode
|
||||||
|
run:
|
||||||
|
go run . -port=8080 -dashboard=true -cleanup=1h -max-age=24h
|
||||||
|
|
||||||
|
# Run the server in production mode
|
||||||
|
run-prod:
|
||||||
|
./telemetry-server -port=8080 -dashboard=true -cleanup=24h -max-age=720h
|
||||||
|
|
||||||
|
# Clean build artifacts
|
||||||
|
clean:
|
||||||
|
rm -f telemetry-server
|
||||||
|
rm -f ../test/telemetry-server-test.log
|
||||||
|
go clean
|
||||||
|
|
||||||
|
# Run unit tests
|
||||||
|
test:
|
||||||
|
go test ./...
|
||||||
|
|
||||||
|
# Run integration tests
|
||||||
|
integration-test:
|
||||||
|
@echo "🧪 Running telemetry integration tests..."
|
||||||
|
cd ../../ && go run telemetry/test/integration.go
|
||||||
|
|
||||||
|
# Run all tests (unit + integration)
|
||||||
|
test-all: test integration-test
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
deps:
|
||||||
|
go mod download
|
||||||
|
go mod tidy
|
||||||
|
|
||||||
|
# Generate protobuf code (requires protoc)
|
||||||
|
proto:
|
||||||
|
cd .. && protoc --go_out=. --go_opt=paths=source_relative proto/telemetry.proto
|
||||||
|
|
||||||
|
# Build Docker image
|
||||||
|
docker-build:
|
||||||
|
docker build -t seaweedfs-telemetry .
|
||||||
|
|
||||||
|
# Run with Docker
|
||||||
|
docker-run:
|
||||||
|
docker run -p 8080:8080 seaweedfs-telemetry -port=8080 -dashboard=true
|
||||||
|
|
||||||
|
# Development with auto-reload (requires air: go install github.com/cosmtrek/air@latest)
|
||||||
|
dev:
|
||||||
|
air
|
||||||
|
|
||||||
|
# Check if protoc is available
|
||||||
|
check-protoc:
|
||||||
|
@which protoc > /dev/null || (echo "protoc is required for proto generation. Install from https://grpc.io/docs/protoc-installation/" && exit 1)
|
||||||
|
|
||||||
|
# Full development setup
|
||||||
|
setup: check-protoc deps proto build
|
||||||
|
|
||||||
|
# Run a quick smoke test
|
||||||
|
smoke-test: build
|
||||||
|
@echo "🔥 Running smoke test..."
|
||||||
|
@timeout 10s ./telemetry-server -port=18081 > /dev/null 2>&1 & \
|
||||||
|
SERVER_PID=$$!; \
|
||||||
|
sleep 2; \
|
||||||
|
if curl -s http://localhost:18081/health > /dev/null; then \
|
||||||
|
echo "✅ Smoke test passed - server responds to health check"; \
|
||||||
|
else \
|
||||||
|
echo "❌ Smoke test failed - server not responding"; \
|
||||||
|
exit 1; \
|
||||||
|
fi; \
|
||||||
|
kill $$SERVER_PID 2>/dev/null || true
|
||||||
|
|
||||||
|
# Continuous integration target
|
||||||
|
ci: deps proto build test integration-test
|
||||||
|
@echo "🎉 All CI tests passed!"
|
||||||
|
|
||||||
|
# Help
|
||||||
|
help:
|
||||||
|
@echo "Available targets:"
|
||||||
|
@echo " build - Build the telemetry server binary"
|
||||||
|
@echo " run - Run server in development mode"
|
||||||
|
@echo " run-prod - Run server in production mode"
|
||||||
|
@echo " clean - Clean build artifacts"
|
||||||
|
@echo " test - Run unit tests"
|
||||||
|
@echo " integration-test- Run integration tests"
|
||||||
|
@echo " test-all - Run all tests (unit + integration)"
|
||||||
|
@echo " deps - Install Go dependencies"
|
||||||
|
@echo " proto - Generate protobuf code"
|
||||||
|
@echo " docker-build - Build Docker image"
|
||||||
|
@echo " docker-run - Run with Docker"
|
||||||
|
@echo " dev - Run with auto-reload (requires air)"
|
||||||
|
@echo " smoke-test - Quick server health check"
|
||||||
|
@echo " setup - Full development setup"
|
||||||
|
@echo " ci - Continuous integration (all tests)"
|
||||||
|
@echo " help - Show this help"
|
152
telemetry/server/api/handlers.go
Normal file
152
telemetry/server/api/handlers.go
Normal file
|
@ -0,0 +1,152 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/proto"
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/server/storage"
|
||||||
|
protobuf "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
storage *storage.PrometheusStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandler(storage *storage.PrometheusStorage) *Handler {
|
||||||
|
return &Handler{storage: storage}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) CollectTelemetry(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodPost {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := r.Header.Get("Content-Type")
|
||||||
|
|
||||||
|
// Only accept protobuf content type
|
||||||
|
if contentType != "application/x-protobuf" && contentType != "application/protobuf" {
|
||||||
|
http.Error(w, "Content-Type must be application/x-protobuf", http.StatusUnsupportedMediaType)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read protobuf request
|
||||||
|
body, err := io.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
req := &proto.TelemetryRequest{}
|
||||||
|
if err := protobuf.Unmarshal(body, req); err != nil {
|
||||||
|
http.Error(w, "Invalid protobuf data", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data := req.Data
|
||||||
|
if data == nil {
|
||||||
|
http.Error(w, "Missing telemetry data", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Validate required fields
|
||||||
|
if data.ClusterId == "" || data.Version == "" || data.Os == "" {
|
||||||
|
http.Error(w, "Missing required fields", http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set timestamp if not provided
|
||||||
|
if data.Timestamp == 0 {
|
||||||
|
data.Timestamp = time.Now().Unix()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Store the telemetry data
|
||||||
|
if err := h.storage.StoreTelemetry(data); err != nil {
|
||||||
|
http.Error(w, "Failed to store data", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return protobuf response
|
||||||
|
resp := &proto.TelemetryResponse{
|
||||||
|
Success: true,
|
||||||
|
Message: "Telemetry data received",
|
||||||
|
}
|
||||||
|
|
||||||
|
respData, err := protobuf.Marshal(resp)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to marshal response", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/x-protobuf")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write(respData)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetStats(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodGet {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
stats, err := h.storage.GetStats()
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to get stats", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetInstances(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodGet {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
limitStr := r.URL.Query().Get("limit")
|
||||||
|
limit := 100 // default
|
||||||
|
if limitStr != "" {
|
||||||
|
if l, err := strconv.Atoi(limitStr); err == nil && l > 0 && l <= 1000 {
|
||||||
|
limit = l
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
instances, err := h.storage.GetInstances(limit)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to get instances", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(instances)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetMetrics(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.Method != http.MethodGet {
|
||||||
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
daysStr := r.URL.Query().Get("days")
|
||||||
|
days := 30 // default
|
||||||
|
if daysStr != "" {
|
||||||
|
if d, err := strconv.Atoi(daysStr); err == nil && d > 0 && d <= 365 {
|
||||||
|
days = d
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics, err := h.storage.GetMetrics(days)
|
||||||
|
if err != nil {
|
||||||
|
http.Error(w, "Failed to get metrics", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(metrics)
|
||||||
|
}
|
274
telemetry/server/dashboard/dashboard.go
Normal file
274
telemetry/server/dashboard/dashboard.go
Normal file
|
@ -0,0 +1,274 @@
|
||||||
|
package dashboard
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Handler struct{}
|
||||||
|
|
||||||
|
func NewHandler() *Handler {
|
||||||
|
return &Handler{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) ServeIndex(w http.ResponseWriter, r *http.Request) {
|
||||||
|
html := `<!DOCTYPE html>
|
||||||
|
<html lang="en">
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||||
|
<title>SeaweedFS Telemetry Dashboard</title>
|
||||||
|
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
|
||||||
|
margin: 0;
|
||||||
|
padding: 20px;
|
||||||
|
background-color: #f5f5f5;
|
||||||
|
}
|
||||||
|
.container {
|
||||||
|
max-width: 1200px;
|
||||||
|
margin: 0 auto;
|
||||||
|
}
|
||||||
|
.header {
|
||||||
|
background: white;
|
||||||
|
padding: 20px;
|
||||||
|
border-radius: 8px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||||
|
}
|
||||||
|
.stats-grid {
|
||||||
|
display: grid;
|
||||||
|
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
||||||
|
gap: 20px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
}
|
||||||
|
.stat-card {
|
||||||
|
background: white;
|
||||||
|
padding: 20px;
|
||||||
|
border-radius: 8px;
|
||||||
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||||
|
}
|
||||||
|
.stat-value {
|
||||||
|
font-size: 2em;
|
||||||
|
font-weight: bold;
|
||||||
|
color: #2196F3;
|
||||||
|
}
|
||||||
|
.stat-label {
|
||||||
|
color: #666;
|
||||||
|
margin-top: 5px;
|
||||||
|
}
|
||||||
|
.chart-container {
|
||||||
|
background: white;
|
||||||
|
padding: 20px;
|
||||||
|
border-radius: 8px;
|
||||||
|
margin-bottom: 20px;
|
||||||
|
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
||||||
|
}
|
||||||
|
.chart-title {
|
||||||
|
font-size: 1.2em;
|
||||||
|
font-weight: bold;
|
||||||
|
margin-bottom: 15px;
|
||||||
|
}
|
||||||
|
.loading {
|
||||||
|
text-align: center;
|
||||||
|
padding: 40px;
|
||||||
|
color: #666;
|
||||||
|
}
|
||||||
|
.error {
|
||||||
|
background: #ffebee;
|
||||||
|
color: #c62828;
|
||||||
|
padding: 15px;
|
||||||
|
border-radius: 4px;
|
||||||
|
margin: 10px 0;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<div class="header">
|
||||||
|
<h1>SeaweedFS Telemetry Dashboard</h1>
|
||||||
|
<p>Privacy-respecting usage analytics for SeaweedFS</p>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div id="loading" class="loading">Loading telemetry data...</div>
|
||||||
|
<div id="error" class="error" style="display: none;"></div>
|
||||||
|
|
||||||
|
<div id="dashboard" style="display: none;">
|
||||||
|
<div class="stats-grid">
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="stat-value" id="totalInstances">-</div>
|
||||||
|
<div class="stat-label">Total Instances (30 days)</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="stat-value" id="activeInstances">-</div>
|
||||||
|
<div class="stat-label">Active Instances (7 days)</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="stat-value" id="totalVersions">-</div>
|
||||||
|
<div class="stat-label">Different Versions</div>
|
||||||
|
</div>
|
||||||
|
<div class="stat-card">
|
||||||
|
<div class="stat-value" id="totalOS">-</div>
|
||||||
|
<div class="stat-label">Operating Systems</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="chart-container">
|
||||||
|
<div class="chart-title">Version Distribution</div>
|
||||||
|
<canvas id="versionChart" width="400" height="200"></canvas>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="chart-container">
|
||||||
|
<div class="chart-title">Operating System Distribution</div>
|
||||||
|
<canvas id="osChart" width="400" height="200"></canvas>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<div class="chart-container">
|
||||||
|
<div class="chart-title">Volume Servers Over Time</div>
|
||||||
|
<canvas id="serverChart" width="400" height="200"></canvas>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="chart-container">
|
||||||
|
<div class="chart-title">Total Disk Usage Over Time</div>
|
||||||
|
<canvas id="diskChart" width="400" height="200"></canvas>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
let charts = {};
|
||||||
|
|
||||||
|
async function loadDashboard() {
|
||||||
|
try {
|
||||||
|
// Load stats
|
||||||
|
const statsResponse = await fetch('/api/stats');
|
||||||
|
const stats = await statsResponse.json();
|
||||||
|
|
||||||
|
// Load metrics
|
||||||
|
const metricsResponse = await fetch('/api/metrics?days=30');
|
||||||
|
const metrics = await metricsResponse.json();
|
||||||
|
|
||||||
|
updateStats(stats);
|
||||||
|
updateCharts(stats, metrics);
|
||||||
|
|
||||||
|
document.getElementById('loading').style.display = 'none';
|
||||||
|
document.getElementById('dashboard').style.display = 'block';
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error loading dashboard:', error);
|
||||||
|
showError('Failed to load telemetry data: ' + error.message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateStats(stats) {
|
||||||
|
document.getElementById('totalInstances').textContent = stats.total_instances || 0;
|
||||||
|
document.getElementById('activeInstances').textContent = stats.active_instances || 0;
|
||||||
|
document.getElementById('totalVersions').textContent = Object.keys(stats.versions || {}).length;
|
||||||
|
document.getElementById('totalOS').textContent = Object.keys(stats.os_distribution || {}).length;
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateCharts(stats, metrics) {
|
||||||
|
// Version chart
|
||||||
|
createPieChart('versionChart', 'Version Distribution', stats.versions || {});
|
||||||
|
|
||||||
|
// OS chart
|
||||||
|
createPieChart('osChart', 'Operating System Distribution', stats.os_distribution || {});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Server count over time
|
||||||
|
if (metrics.dates && metrics.server_counts) {
|
||||||
|
createLineChart('serverChart', 'Volume Servers', metrics.dates, metrics.server_counts, '#2196F3');
|
||||||
|
}
|
||||||
|
|
||||||
|
// Disk usage over time
|
||||||
|
if (metrics.dates && metrics.disk_usage) {
|
||||||
|
const diskUsageGB = metrics.disk_usage.map(bytes => Math.round(bytes / (1024 * 1024 * 1024)));
|
||||||
|
createLineChart('diskChart', 'Disk Usage (GB)', metrics.dates, diskUsageGB, '#4CAF50');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function createPieChart(canvasId, title, data) {
|
||||||
|
const ctx = document.getElementById(canvasId).getContext('2d');
|
||||||
|
|
||||||
|
if (charts[canvasId]) {
|
||||||
|
charts[canvasId].destroy();
|
||||||
|
}
|
||||||
|
|
||||||
|
const labels = Object.keys(data);
|
||||||
|
const values = Object.values(data);
|
||||||
|
|
||||||
|
charts[canvasId] = new Chart(ctx, {
|
||||||
|
type: 'pie',
|
||||||
|
data: {
|
||||||
|
labels: labels,
|
||||||
|
datasets: [{
|
||||||
|
data: values,
|
||||||
|
backgroundColor: [
|
||||||
|
'#FF6384', '#36A2EB', '#FFCE56', '#4BC0C0',
|
||||||
|
'#9966FF', '#FF9F40', '#FF6384', '#C9CBCF'
|
||||||
|
]
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
responsive: true,
|
||||||
|
plugins: {
|
||||||
|
legend: {
|
||||||
|
position: 'bottom'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function createLineChart(canvasId, label, labels, data, color) {
|
||||||
|
const ctx = document.getElementById(canvasId).getContext('2d');
|
||||||
|
|
||||||
|
if (charts[canvasId]) {
|
||||||
|
charts[canvasId].destroy();
|
||||||
|
}
|
||||||
|
|
||||||
|
charts[canvasId] = new Chart(ctx, {
|
||||||
|
type: 'line',
|
||||||
|
data: {
|
||||||
|
labels: labels,
|
||||||
|
datasets: [{
|
||||||
|
label: label,
|
||||||
|
data: data,
|
||||||
|
borderColor: color,
|
||||||
|
backgroundColor: color + '20',
|
||||||
|
fill: true,
|
||||||
|
tension: 0.1
|
||||||
|
}]
|
||||||
|
},
|
||||||
|
options: {
|
||||||
|
responsive: true,
|
||||||
|
scales: {
|
||||||
|
y: {
|
||||||
|
beginAtZero: true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function showError(message) {
|
||||||
|
document.getElementById('loading').style.display = 'none';
|
||||||
|
document.getElementById('error').style.display = 'block';
|
||||||
|
document.getElementById('error').textContent = message;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load dashboard on page load
|
||||||
|
loadDashboard();
|
||||||
|
|
||||||
|
// Refresh every 5 minutes
|
||||||
|
setInterval(loadDashboard, 5 * 60 * 1000);
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>`
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "text/html")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
w.Write([]byte(html))
|
||||||
|
}
|
31
telemetry/server/go.sum
Normal file
31
telemetry/server/go.sum
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
|
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||||
|
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
|
||||||
|
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
|
||||||
|
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
|
||||||
|
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||||
|
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
|
||||||
|
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
|
||||||
|
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
|
||||||
|
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
|
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||||
|
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||||
|
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
111
telemetry/server/main.go
Normal file
111
telemetry/server/main.go
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/server/api"
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/server/dashboard"
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/server/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
port = flag.Int("port", 8080, "HTTP server port")
|
||||||
|
enableCORS = flag.Bool("cors", true, "Enable CORS for dashboard")
|
||||||
|
logRequests = flag.Bool("log", true, "Log incoming requests")
|
||||||
|
enableDashboard = flag.Bool("dashboard", true, "Enable built-in dashboard (optional when using Grafana)")
|
||||||
|
cleanupInterval = flag.Duration("cleanup", 24*time.Hour, "Cleanup interval for old instances")
|
||||||
|
maxInstanceAge = flag.Duration("max-age", 30*24*time.Hour, "Maximum age for instances before cleanup")
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
// Create Prometheus storage instance
|
||||||
|
store := storage.NewPrometheusStorage()
|
||||||
|
|
||||||
|
// Start cleanup routine
|
||||||
|
go func() {
|
||||||
|
ticker := time.NewTicker(*cleanupInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
for range ticker.C {
|
||||||
|
store.CleanupOldInstances(*maxInstanceAge)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Setup HTTP handlers
|
||||||
|
mux := http.NewServeMux()
|
||||||
|
|
||||||
|
// Prometheus metrics endpoint
|
||||||
|
mux.Handle("/metrics", promhttp.Handler())
|
||||||
|
|
||||||
|
// API endpoints
|
||||||
|
apiHandler := api.NewHandler(store)
|
||||||
|
mux.HandleFunc("/api/collect", corsMiddleware(logMiddleware(apiHandler.CollectTelemetry)))
|
||||||
|
mux.HandleFunc("/api/stats", corsMiddleware(logMiddleware(apiHandler.GetStats)))
|
||||||
|
mux.HandleFunc("/api/instances", corsMiddleware(logMiddleware(apiHandler.GetInstances)))
|
||||||
|
mux.HandleFunc("/api/metrics", corsMiddleware(logMiddleware(apiHandler.GetMetrics)))
|
||||||
|
|
||||||
|
// Dashboard (optional)
|
||||||
|
if *enableDashboard {
|
||||||
|
dashboardHandler := dashboard.NewHandler()
|
||||||
|
mux.HandleFunc("/", corsMiddleware(dashboardHandler.ServeIndex))
|
||||||
|
mux.HandleFunc("/dashboard", corsMiddleware(dashboardHandler.ServeIndex))
|
||||||
|
mux.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Health check
|
||||||
|
mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
json.NewEncoder(w).Encode(map[string]string{
|
||||||
|
"status": "ok",
|
||||||
|
"time": time.Now().UTC().Format(time.RFC3339),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
addr := fmt.Sprintf(":%d", *port)
|
||||||
|
log.Printf("Starting telemetry server on %s", addr)
|
||||||
|
log.Printf("Prometheus metrics: http://localhost%s/metrics", addr)
|
||||||
|
if *enableDashboard {
|
||||||
|
log.Printf("Dashboard: http://localhost%s/dashboard", addr)
|
||||||
|
}
|
||||||
|
log.Printf("Cleanup interval: %v, Max instance age: %v", *cleanupInterval, *maxInstanceAge)
|
||||||
|
|
||||||
|
if err := http.ListenAndServe(addr, mux); err != nil {
|
||||||
|
log.Fatalf("Server failed: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func corsMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if *enableCORS {
|
||||||
|
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||||
|
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, OPTIONS")
|
||||||
|
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization")
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.Method == "OPTIONS" {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
next(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func logMiddleware(next http.HandlerFunc) http.HandlerFunc {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if *logRequests {
|
||||||
|
start := time.Now()
|
||||||
|
next(w, r)
|
||||||
|
log.Printf("%s %s %s %v", r.Method, r.URL.Path, r.RemoteAddr, time.Since(start))
|
||||||
|
} else {
|
||||||
|
next(w, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
235
telemetry/server/storage/prometheus.go
Normal file
235
telemetry/server/storage/prometheus.go
Normal file
|
@ -0,0 +1,235 @@
|
||||||
|
package storage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PrometheusStorage struct {
|
||||||
|
// Prometheus metrics
|
||||||
|
totalClusters prometheus.Gauge
|
||||||
|
activeClusters prometheus.Gauge
|
||||||
|
volumeServerCount *prometheus.GaugeVec
|
||||||
|
totalDiskBytes *prometheus.GaugeVec
|
||||||
|
totalVolumeCount *prometheus.GaugeVec
|
||||||
|
filerCount *prometheus.GaugeVec
|
||||||
|
brokerCount *prometheus.GaugeVec
|
||||||
|
clusterInfo *prometheus.GaugeVec
|
||||||
|
telemetryReceived prometheus.Counter
|
||||||
|
|
||||||
|
// In-memory storage for API endpoints (if needed)
|
||||||
|
mu sync.RWMutex
|
||||||
|
instances map[string]*telemetryData
|
||||||
|
stats map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// telemetryData is an internal struct that includes the received timestamp
|
||||||
|
type telemetryData struct {
|
||||||
|
*proto.TelemetryData
|
||||||
|
ReceivedAt time.Time `json:"received_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPrometheusStorage() *PrometheusStorage {
|
||||||
|
return &PrometheusStorage{
|
||||||
|
totalClusters: promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_total_clusters",
|
||||||
|
Help: "Total number of unique SeaweedFS clusters (last 30 days)",
|
||||||
|
}),
|
||||||
|
activeClusters: promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_active_clusters",
|
||||||
|
Help: "Number of active SeaweedFS clusters (last 7 days)",
|
||||||
|
}),
|
||||||
|
volumeServerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_volume_servers",
|
||||||
|
Help: "Number of volume servers per cluster",
|
||||||
|
}, []string{"cluster_id", "version", "os"}),
|
||||||
|
totalDiskBytes: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_disk_bytes",
|
||||||
|
Help: "Total disk usage in bytes per cluster",
|
||||||
|
}, []string{"cluster_id", "version", "os"}),
|
||||||
|
totalVolumeCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_volume_count",
|
||||||
|
Help: "Total number of volumes per cluster",
|
||||||
|
}, []string{"cluster_id", "version", "os"}),
|
||||||
|
filerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_filer_count",
|
||||||
|
Help: "Number of filer servers per cluster",
|
||||||
|
}, []string{"cluster_id", "version", "os"}),
|
||||||
|
brokerCount: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_broker_count",
|
||||||
|
Help: "Number of broker servers per cluster",
|
||||||
|
}, []string{"cluster_id", "version", "os"}),
|
||||||
|
clusterInfo: promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: "seaweedfs_telemetry_cluster_info",
|
||||||
|
Help: "Cluster information (always 1, labels contain metadata)",
|
||||||
|
}, []string{"cluster_id", "version", "os"}),
|
||||||
|
telemetryReceived: promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Name: "seaweedfs_telemetry_reports_received_total",
|
||||||
|
Help: "Total number of telemetry reports received",
|
||||||
|
}),
|
||||||
|
instances: make(map[string]*telemetryData),
|
||||||
|
stats: make(map[string]interface{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PrometheusStorage) StoreTelemetry(data *proto.TelemetryData) error {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
// Update Prometheus metrics
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
"cluster_id": data.ClusterId,
|
||||||
|
"version": data.Version,
|
||||||
|
"os": data.Os,
|
||||||
|
}
|
||||||
|
|
||||||
|
s.volumeServerCount.With(labels).Set(float64(data.VolumeServerCount))
|
||||||
|
s.totalDiskBytes.With(labels).Set(float64(data.TotalDiskBytes))
|
||||||
|
s.totalVolumeCount.With(labels).Set(float64(data.TotalVolumeCount))
|
||||||
|
s.filerCount.With(labels).Set(float64(data.FilerCount))
|
||||||
|
s.brokerCount.With(labels).Set(float64(data.BrokerCount))
|
||||||
|
|
||||||
|
infoLabels := prometheus.Labels{
|
||||||
|
"cluster_id": data.ClusterId,
|
||||||
|
"version": data.Version,
|
||||||
|
"os": data.Os,
|
||||||
|
}
|
||||||
|
s.clusterInfo.With(infoLabels).Set(1)
|
||||||
|
|
||||||
|
s.telemetryReceived.Inc()
|
||||||
|
|
||||||
|
// Store in memory for API endpoints
|
||||||
|
s.instances[data.ClusterId] = &telemetryData{
|
||||||
|
TelemetryData: data,
|
||||||
|
ReceivedAt: time.Now().UTC(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update aggregated stats
|
||||||
|
s.updateStats()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PrometheusStorage) GetStats() (map[string]interface{}, error) {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
// Return cached stats
|
||||||
|
result := make(map[string]interface{})
|
||||||
|
for k, v := range s.stats {
|
||||||
|
result[k] = v
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PrometheusStorage) GetInstances(limit int) ([]*telemetryData, error) {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
var instances []*telemetryData
|
||||||
|
count := 0
|
||||||
|
for _, instance := range s.instances {
|
||||||
|
if count >= limit {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
instances = append(instances, instance)
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
return instances, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PrometheusStorage) GetMetrics(days int) (map[string]interface{}, error) {
|
||||||
|
s.mu.RLock()
|
||||||
|
defer s.mu.RUnlock()
|
||||||
|
|
||||||
|
// Return current metrics from in-memory storage
|
||||||
|
// Historical data should be queried from Prometheus directly
|
||||||
|
cutoff := time.Now().AddDate(0, 0, -days)
|
||||||
|
|
||||||
|
var volumeServers []map[string]interface{}
|
||||||
|
var diskUsage []map[string]interface{}
|
||||||
|
|
||||||
|
for _, instance := range s.instances {
|
||||||
|
if instance.ReceivedAt.After(cutoff) {
|
||||||
|
volumeServers = append(volumeServers, map[string]interface{}{
|
||||||
|
"date": instance.ReceivedAt.Format("2006-01-02"),
|
||||||
|
"value": instance.TelemetryData.VolumeServerCount,
|
||||||
|
})
|
||||||
|
diskUsage = append(diskUsage, map[string]interface{}{
|
||||||
|
"date": instance.ReceivedAt.Format("2006-01-02"),
|
||||||
|
"value": instance.TelemetryData.TotalDiskBytes,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return map[string]interface{}{
|
||||||
|
"volume_servers": volumeServers,
|
||||||
|
"disk_usage": diskUsage,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *PrometheusStorage) updateStats() {
|
||||||
|
now := time.Now()
|
||||||
|
last7Days := now.AddDate(0, 0, -7)
|
||||||
|
last30Days := now.AddDate(0, 0, -30)
|
||||||
|
|
||||||
|
totalInstances := 0
|
||||||
|
activeInstances := 0
|
||||||
|
versions := make(map[string]int)
|
||||||
|
osDistribution := make(map[string]int)
|
||||||
|
|
||||||
|
for _, instance := range s.instances {
|
||||||
|
if instance.ReceivedAt.After(last30Days) {
|
||||||
|
totalInstances++
|
||||||
|
}
|
||||||
|
if instance.ReceivedAt.After(last7Days) {
|
||||||
|
activeInstances++
|
||||||
|
versions[instance.TelemetryData.Version]++
|
||||||
|
osDistribution[instance.TelemetryData.Os]++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update Prometheus gauges
|
||||||
|
s.totalClusters.Set(float64(totalInstances))
|
||||||
|
s.activeClusters.Set(float64(activeInstances))
|
||||||
|
|
||||||
|
// Update cached stats for API
|
||||||
|
s.stats = map[string]interface{}{
|
||||||
|
"total_instances": totalInstances,
|
||||||
|
"active_instances": activeInstances,
|
||||||
|
"versions": versions,
|
||||||
|
"os_distribution": osDistribution,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanupOldInstances removes instances older than the specified duration
|
||||||
|
func (s *PrometheusStorage) CleanupOldInstances(maxAge time.Duration) {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
cutoff := time.Now().Add(-maxAge)
|
||||||
|
for instanceID, instance := range s.instances {
|
||||||
|
if instance.ReceivedAt.Before(cutoff) {
|
||||||
|
delete(s.instances, instanceID)
|
||||||
|
|
||||||
|
// Remove from Prometheus metrics
|
||||||
|
labels := prometheus.Labels{
|
||||||
|
"cluster_id": instance.TelemetryData.ClusterId,
|
||||||
|
"version": instance.TelemetryData.Version,
|
||||||
|
"os": instance.TelemetryData.Os,
|
||||||
|
}
|
||||||
|
s.volumeServerCount.Delete(labels)
|
||||||
|
s.totalDiskBytes.Delete(labels)
|
||||||
|
s.totalVolumeCount.Delete(labels)
|
||||||
|
s.filerCount.Delete(labels)
|
||||||
|
s.brokerCount.Delete(labels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s.updateStats()
|
||||||
|
}
|
311
telemetry/test/integration.go
Normal file
311
telemetry/test/integration.go
Normal file
|
@ -0,0 +1,311 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/telemetry/proto"
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/telemetry"
|
||||||
|
protobuf "google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
serverPort = "18080" // Use different port to avoid conflicts
|
||||||
|
serverURL = "http://localhost:" + serverPort
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
fmt.Println("🧪 Starting SeaweedFS Telemetry Integration Test")
|
||||||
|
|
||||||
|
// Start telemetry server
|
||||||
|
fmt.Println("📡 Starting telemetry server...")
|
||||||
|
serverCmd, err := startTelemetryServer()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("❌ Failed to start telemetry server: %v", err)
|
||||||
|
}
|
||||||
|
defer stopServer(serverCmd)
|
||||||
|
|
||||||
|
// Wait for server to start
|
||||||
|
if !waitForServer(serverURL+"/health", 15*time.Second) {
|
||||||
|
log.Fatal("❌ Telemetry server failed to start")
|
||||||
|
}
|
||||||
|
fmt.Println("✅ Telemetry server started successfully")
|
||||||
|
|
||||||
|
// Test protobuf marshaling first
|
||||||
|
fmt.Println("🔧 Testing protobuf marshaling...")
|
||||||
|
if err := testProtobufMarshaling(); err != nil {
|
||||||
|
log.Fatalf("❌ Protobuf marshaling test failed: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println("✅ Protobuf marshaling test passed")
|
||||||
|
|
||||||
|
// Test protobuf client
|
||||||
|
fmt.Println("🔄 Testing protobuf telemetry client...")
|
||||||
|
if err := testTelemetryClient(); err != nil {
|
||||||
|
log.Fatalf("❌ Telemetry client test failed: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println("✅ Telemetry client test passed")
|
||||||
|
|
||||||
|
// Test server metrics endpoint
|
||||||
|
fmt.Println("📊 Testing Prometheus metrics endpoint...")
|
||||||
|
if err := testMetricsEndpoint(); err != nil {
|
||||||
|
log.Fatalf("❌ Metrics endpoint test failed: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println("✅ Metrics endpoint test passed")
|
||||||
|
|
||||||
|
// Test stats API
|
||||||
|
fmt.Println("📈 Testing stats API...")
|
||||||
|
if err := testStatsAPI(); err != nil {
|
||||||
|
log.Fatalf("❌ Stats API test failed: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println("✅ Stats API test passed")
|
||||||
|
|
||||||
|
// Test instances API
|
||||||
|
fmt.Println("📋 Testing instances API...")
|
||||||
|
if err := testInstancesAPI(); err != nil {
|
||||||
|
log.Fatalf("❌ Instances API test failed: %v", err)
|
||||||
|
}
|
||||||
|
fmt.Println("✅ Instances API test passed")
|
||||||
|
|
||||||
|
fmt.Println("🎉 All telemetry integration tests passed!")
|
||||||
|
}
|
||||||
|
|
||||||
|
func startTelemetryServer() (*exec.Cmd, error) {
|
||||||
|
// Get the directory where this test is running
|
||||||
|
testDir, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get working directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Navigate to the server directory (from main seaweedfs directory)
|
||||||
|
serverDir := filepath.Join(testDir, "telemetry", "server")
|
||||||
|
|
||||||
|
cmd := exec.Command("go", "run", ".",
|
||||||
|
"-port="+serverPort,
|
||||||
|
"-dashboard=false",
|
||||||
|
"-cleanup=1m",
|
||||||
|
"-max-age=1h")
|
||||||
|
|
||||||
|
cmd.Dir = serverDir
|
||||||
|
|
||||||
|
// Create log files for server output
|
||||||
|
logFile, err := os.Create("telemetry-server-test.log")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to create log file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Stdout = logFile
|
||||||
|
cmd.Stderr = logFile
|
||||||
|
|
||||||
|
if err := cmd.Start(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to start server: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cmd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopServer(cmd *exec.Cmd) {
|
||||||
|
if cmd != nil && cmd.Process != nil {
|
||||||
|
cmd.Process.Signal(syscall.SIGTERM)
|
||||||
|
cmd.Wait()
|
||||||
|
|
||||||
|
// Clean up log file
|
||||||
|
os.Remove("telemetry-server-test.log")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForServer(url string, timeout time.Duration) bool {
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
fmt.Printf("⏳ Waiting for server at %s...\n", url)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err == nil {
|
||||||
|
resp.Body.Close()
|
||||||
|
if resp.StatusCode == http.StatusOK {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testProtobufMarshaling() error {
|
||||||
|
// Test protobuf marshaling/unmarshaling
|
||||||
|
testData := &proto.TelemetryData{
|
||||||
|
ClusterId: "test-cluster-12345",
|
||||||
|
Version: "test-3.45",
|
||||||
|
Os: "linux/amd64",
|
||||||
|
VolumeServerCount: 2,
|
||||||
|
TotalDiskBytes: 1000000,
|
||||||
|
TotalVolumeCount: 10,
|
||||||
|
FilerCount: 1,
|
||||||
|
BrokerCount: 1,
|
||||||
|
Timestamp: time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal
|
||||||
|
data, err := protobuf.Marshal(testData)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to marshal protobuf: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" Protobuf size: %d bytes\n", len(data))
|
||||||
|
|
||||||
|
// Unmarshal
|
||||||
|
testData2 := &proto.TelemetryData{}
|
||||||
|
if err := protobuf.Unmarshal(data, testData2); err != nil {
|
||||||
|
return fmt.Errorf("failed to unmarshal protobuf: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify data
|
||||||
|
if testData2.ClusterId != testData.ClusterId {
|
||||||
|
return fmt.Errorf("protobuf data mismatch: expected %s, got %s",
|
||||||
|
testData.ClusterId, testData2.ClusterId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testData2.VolumeServerCount != testData.VolumeServerCount {
|
||||||
|
return fmt.Errorf("volume server count mismatch: expected %d, got %d",
|
||||||
|
testData.VolumeServerCount, testData2.VolumeServerCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testTelemetryClient() error {
|
||||||
|
// Create telemetry client
|
||||||
|
client := telemetry.NewClient(serverURL+"/api/collect", true)
|
||||||
|
|
||||||
|
// Create test data using protobuf format
|
||||||
|
testData := &proto.TelemetryData{
|
||||||
|
Version: "test-3.45",
|
||||||
|
Os: "linux/amd64",
|
||||||
|
VolumeServerCount: 3,
|
||||||
|
TotalDiskBytes: 1073741824, // 1GB
|
||||||
|
TotalVolumeCount: 50,
|
||||||
|
FilerCount: 2,
|
||||||
|
BrokerCount: 1,
|
||||||
|
Timestamp: time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send telemetry data
|
||||||
|
if err := client.SendTelemetry(testData); err != nil {
|
||||||
|
return fmt.Errorf("failed to send telemetry: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" Sent telemetry for cluster: %s\n", client.GetInstanceID())
|
||||||
|
|
||||||
|
// Wait a bit for processing
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testMetricsEndpoint() error {
|
||||||
|
resp, err := http.Get(serverURL + "/metrics")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get metrics: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("metrics endpoint returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read response and check for expected metrics
|
||||||
|
content, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read metrics response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
contentStr := string(content)
|
||||||
|
expectedMetrics := []string{
|
||||||
|
"seaweedfs_telemetry_total_clusters",
|
||||||
|
"seaweedfs_telemetry_active_clusters",
|
||||||
|
"seaweedfs_telemetry_reports_received_total",
|
||||||
|
"seaweedfs_telemetry_volume_servers",
|
||||||
|
"seaweedfs_telemetry_disk_bytes",
|
||||||
|
"seaweedfs_telemetry_volume_count",
|
||||||
|
"seaweedfs_telemetry_filer_count",
|
||||||
|
"seaweedfs_telemetry_broker_count",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, metric := range expectedMetrics {
|
||||||
|
if !strings.Contains(contentStr, metric) {
|
||||||
|
return fmt.Errorf("missing expected metric: %s", metric)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that we have at least one report received
|
||||||
|
if !strings.Contains(contentStr, "seaweedfs_telemetry_reports_received_total 1") {
|
||||||
|
fmt.Printf(" Warning: Expected at least 1 report received, metrics content:\n%s\n", contentStr)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" Found %d expected metrics\n", len(expectedMetrics))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testStatsAPI() error {
|
||||||
|
resp, err := http.Get(serverURL + "/api/stats")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get stats: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("stats API returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and verify JSON response
|
||||||
|
content, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read stats response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
contentStr := string(content)
|
||||||
|
if !strings.Contains(contentStr, "total_instances") {
|
||||||
|
return fmt.Errorf("stats response missing total_instances field")
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" Stats response: %s\n", contentStr)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testInstancesAPI() error {
|
||||||
|
resp, err := http.Get(serverURL + "/api/instances?limit=10")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get instances: %v", err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
return fmt.Errorf("instances API returned status %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read response
|
||||||
|
content, err := io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to read instances response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf(" Instances response length: %d bytes\n", len(content))
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,12 +1,13 @@
|
||||||
package lock_manager
|
package lock_manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
|
||||||
"sort"
|
"sort"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LockRingSnapshot struct {
|
type LockRingSnapshot struct {
|
||||||
|
@ -22,6 +23,7 @@ type LockRing struct {
|
||||||
lastCompactTime time.Time
|
lastCompactTime time.Time
|
||||||
snapshotInterval time.Duration
|
snapshotInterval time.Duration
|
||||||
onTakeSnapshot func(snapshot []pb.ServerAddress)
|
onTakeSnapshot func(snapshot []pb.ServerAddress)
|
||||||
|
cleanupWg sync.WaitGroup
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewLockRing(snapshotInterval time.Duration) *LockRing {
|
func NewLockRing(snapshotInterval time.Duration) *LockRing {
|
||||||
|
@ -87,7 +89,9 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
|
||||||
|
|
||||||
r.addOneSnapshot(servers)
|
r.addOneSnapshot(servers)
|
||||||
|
|
||||||
|
r.cleanupWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
defer r.cleanupWg.Done()
|
||||||
<-time.After(r.snapshotInterval)
|
<-time.After(r.snapshotInterval)
|
||||||
r.compactSnapshots()
|
r.compactSnapshots()
|
||||||
}()
|
}()
|
||||||
|
@ -96,7 +100,9 @@ func (r *LockRing) SetSnapshot(servers []pb.ServerAddress) {
|
||||||
func (r *LockRing) takeSnapshotWithDelayedCompaction() {
|
func (r *LockRing) takeSnapshotWithDelayedCompaction() {
|
||||||
r.doTakeSnapshot()
|
r.doTakeSnapshot()
|
||||||
|
|
||||||
|
r.cleanupWg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
defer r.cleanupWg.Done()
|
||||||
<-time.After(r.snapshotInterval)
|
<-time.After(r.snapshotInterval)
|
||||||
r.compactSnapshots()
|
r.compactSnapshots()
|
||||||
}()
|
}()
|
||||||
|
@ -172,6 +178,19 @@ func (r *LockRing) GetSnapshot() (servers []pb.ServerAddress) {
|
||||||
return r.snapshots[0].servers
|
return r.snapshots[0].servers
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForCleanup waits for all pending cleanup operations to complete
|
||||||
|
// This is useful for testing to ensure deterministic behavior
|
||||||
|
func (r *LockRing) WaitForCleanup() {
|
||||||
|
r.cleanupWg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSnapshotCount safely returns the number of snapshots for testing
|
||||||
|
func (r *LockRing) GetSnapshotCount() int {
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
return len(r.snapshots)
|
||||||
|
}
|
||||||
|
|
||||||
func hashKeyToServer(key string, servers []pb.ServerAddress) pb.ServerAddress {
|
func hashKeyToServer(key string, servers []pb.ServerAddress) pb.ServerAddress {
|
||||||
if len(servers) == 0 {
|
if len(servers) == 0 {
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -1,43 +1,91 @@
|
||||||
package lock_manager
|
package lock_manager
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAddServer(t *testing.T) {
|
func TestAddServer(t *testing.T) {
|
||||||
r := NewLockRing(100 * time.Millisecond)
|
r := NewLockRing(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// Add servers
|
||||||
r.AddServer("localhost:8080")
|
r.AddServer("localhost:8080")
|
||||||
assert.Equal(t, 1, len(r.snapshots))
|
|
||||||
r.AddServer("localhost:8081")
|
r.AddServer("localhost:8081")
|
||||||
r.AddServer("localhost:8082")
|
r.AddServer("localhost:8082")
|
||||||
r.AddServer("localhost:8083")
|
r.AddServer("localhost:8083")
|
||||||
r.AddServer("localhost:8084")
|
r.AddServer("localhost:8084")
|
||||||
|
|
||||||
|
// Verify all servers are present
|
||||||
|
servers := r.GetSnapshot()
|
||||||
|
assert.Equal(t, 5, len(servers))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8080"))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8082"))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8084"))
|
||||||
|
|
||||||
|
// Remove servers
|
||||||
r.RemoveServer("localhost:8084")
|
r.RemoveServer("localhost:8084")
|
||||||
r.RemoveServer("localhost:8082")
|
r.RemoveServer("localhost:8082")
|
||||||
r.RemoveServer("localhost:8080")
|
r.RemoveServer("localhost:8080")
|
||||||
|
|
||||||
assert.Equal(t, 8, len(r.snapshots))
|
// Wait for all cleanup operations to complete
|
||||||
|
r.WaitForCleanup()
|
||||||
|
|
||||||
|
// Verify only 2 servers remain (localhost:8081 and localhost:8083)
|
||||||
|
servers = r.GetSnapshot()
|
||||||
|
assert.Equal(t, 2, len(servers))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
|
||||||
|
|
||||||
|
// Verify cleanup has happened - wait for snapshot interval and check snapshots are compacted
|
||||||
time.Sleep(110 * time.Millisecond)
|
time.Sleep(110 * time.Millisecond)
|
||||||
|
r.WaitForCleanup()
|
||||||
|
|
||||||
assert.Equal(t, 2, len(r.snapshots))
|
// Verify snapshot history is cleaned up properly (should have at most 2 snapshots after compaction)
|
||||||
|
snapshotCount := r.GetSnapshotCount()
|
||||||
|
assert.LessOrEqual(t, snapshotCount, 2, "Snapshot history should be compacted")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLockRing(t *testing.T) {
|
func TestLockRing(t *testing.T) {
|
||||||
r := NewLockRing(100 * time.Millisecond)
|
r := NewLockRing(100 * time.Millisecond)
|
||||||
|
|
||||||
|
// Test initial snapshot
|
||||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081"})
|
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081"})
|
||||||
assert.Equal(t, 1, len(r.snapshots))
|
assert.Equal(t, 1, r.GetSnapshotCount())
|
||||||
|
servers := r.GetSnapshot()
|
||||||
|
assert.Equal(t, 2, len(servers))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8080"))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8081"))
|
||||||
|
|
||||||
|
// Add another server
|
||||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082"})
|
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082"})
|
||||||
assert.Equal(t, 2, len(r.snapshots))
|
assert.Equal(t, 2, r.GetSnapshotCount())
|
||||||
|
servers = r.GetSnapshot()
|
||||||
|
assert.Equal(t, 3, len(servers))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8082"))
|
||||||
|
|
||||||
|
// Wait for cleanup interval and add another server
|
||||||
time.Sleep(110 * time.Millisecond)
|
time.Sleep(110 * time.Millisecond)
|
||||||
|
r.WaitForCleanup()
|
||||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083"})
|
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083"})
|
||||||
assert.Equal(t, 3, len(r.snapshots))
|
assert.LessOrEqual(t, r.GetSnapshotCount(), 3)
|
||||||
|
servers = r.GetSnapshot()
|
||||||
|
assert.Equal(t, 4, len(servers))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8083"))
|
||||||
|
|
||||||
|
// Wait for cleanup and verify compaction
|
||||||
time.Sleep(110 * time.Millisecond)
|
time.Sleep(110 * time.Millisecond)
|
||||||
assert.Equal(t, 2, len(r.snapshots))
|
r.WaitForCleanup()
|
||||||
|
assert.LessOrEqual(t, r.GetSnapshotCount(), 2, "Snapshots should be compacted")
|
||||||
|
|
||||||
|
// Add final server
|
||||||
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083", "localhost:8084"})
|
r.SetSnapshot([]pb.ServerAddress{"localhost:8080", "localhost:8081", "localhost:8082", "localhost:8083", "localhost:8084"})
|
||||||
assert.Equal(t, 3, len(r.snapshots))
|
servers = r.GetSnapshot()
|
||||||
|
assert.Equal(t, 5, len(servers))
|
||||||
|
assert.Contains(t, servers, pb.ServerAddress("localhost:8084"))
|
||||||
|
assert.LessOrEqual(t, r.GetSnapshotCount(), 3)
|
||||||
}
|
}
|
||||||
|
|
|
@ -115,7 +115,10 @@ func runBackup(cmd *Command, args []string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
|
|
||||||
|
ver := needle.Version(stats.Version)
|
||||||
|
|
||||||
|
v, err := storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
||||||
return true
|
return true
|
||||||
|
@ -142,7 +145,7 @@ func runBackup(cmd *Command, args []string) bool {
|
||||||
fmt.Printf("Error destroying volume: %v\n", err)
|
fmt.Printf("Error destroying volume: %v\n", err)
|
||||||
}
|
}
|
||||||
// recreate an empty volume
|
// recreate an empty volume
|
||||||
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, 0, 0)
|
v, err = storage.NewVolume(util.ResolvePath(*s.dir), util.ResolvePath(*s.dir), *s.collection, vid, storage.NeedleMapInMemory, replication, ttl, 0, ver, 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
fmt.Printf("Error creating or reading from volume %d: %v\n", vid, err)
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -41,7 +41,7 @@ func runCompact(cmd *Command, args []string) bool {
|
||||||
preallocate := *compactVolumePreallocate * (1 << 20)
|
preallocate := *compactVolumePreallocate * (1 << 20)
|
||||||
|
|
||||||
vid := needle.VolumeId(*compactVolumeId)
|
vid := needle.VolumeId(*compactVolumeId)
|
||||||
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, 0, 0)
|
v, err := storage.NewVolume(util.ResolvePath(*compactVolumePath), util.ResolvePath(*compactVolumePath), *compactVolumeCollection, vid, storage.NeedleMapInMemory, nil, nil, preallocate, needle.GetCurrentVersion(), 0, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Load Volume [ERROR] %s\n", err)
|
glog.Fatalf("Load Volume [ERROR] %s\n", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -193,6 +193,13 @@ func runFuse(cmd *Command, args []string) bool {
|
||||||
} else {
|
} else {
|
||||||
panic(fmt.Errorf("readOnly: %s", err))
|
panic(fmt.Errorf("readOnly: %s", err))
|
||||||
}
|
}
|
||||||
|
case "disableXAttr":
|
||||||
|
if parsed, err := strconv.ParseBool(parameter.value); err == nil {
|
||||||
|
|
||||||
|
mountOptions.disableXAttr = &parsed
|
||||||
|
} else {
|
||||||
|
panic(fmt.Errorf("disableXAttr: %s", err))
|
||||||
|
}
|
||||||
case "cpuprofile":
|
case "cpuprofile":
|
||||||
mountCpuProfile = ¶meter.value
|
mountCpuProfile = ¶meter.value
|
||||||
case "memprofile":
|
case "memprofile":
|
||||||
|
|
|
@ -3,13 +3,14 @@ package command
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/util/version"
|
||||||
|
|
||||||
hashicorpRaft "github.com/hashicorp/raft"
|
hashicorpRaft "github.com/hashicorp/raft"
|
||||||
|
|
||||||
"slices"
|
"slices"
|
||||||
|
@ -60,6 +61,8 @@ type MasterOptions struct {
|
||||||
electionTimeout *time.Duration
|
electionTimeout *time.Duration
|
||||||
raftHashicorp *bool
|
raftHashicorp *bool
|
||||||
raftBootstrap *bool
|
raftBootstrap *bool
|
||||||
|
telemetryUrl *string
|
||||||
|
telemetryEnabled *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -87,6 +90,8 @@ func init() {
|
||||||
m.electionTimeout = cmdMaster.Flag.Duration("electionTimeout", 10*time.Second, "election timeout of master servers")
|
m.electionTimeout = cmdMaster.Flag.Duration("electionTimeout", 10*time.Second, "election timeout of master servers")
|
||||||
m.raftHashicorp = cmdMaster.Flag.Bool("raftHashicorp", false, "use hashicorp raft")
|
m.raftHashicorp = cmdMaster.Flag.Bool("raftHashicorp", false, "use hashicorp raft")
|
||||||
m.raftBootstrap = cmdMaster.Flag.Bool("raftBootstrap", false, "Whether to bootstrap the Raft cluster")
|
m.raftBootstrap = cmdMaster.Flag.Bool("raftBootstrap", false, "Whether to bootstrap the Raft cluster")
|
||||||
|
m.telemetryUrl = cmdMaster.Flag.String("telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics")
|
||||||
|
m.telemetryEnabled = cmdMaster.Flag.Bool("telemetry", false, "enable telemetry reporting")
|
||||||
}
|
}
|
||||||
|
|
||||||
var cmdMaster = &Command{
|
var cmdMaster = &Command{
|
||||||
|
@ -111,6 +116,11 @@ func runMaster(cmd *Command, args []string) bool {
|
||||||
util.LoadSecurityConfiguration()
|
util.LoadSecurityConfiguration()
|
||||||
util.LoadConfiguration("master", false)
|
util.LoadConfiguration("master", false)
|
||||||
|
|
||||||
|
// bind viper configuration to command line flags
|
||||||
|
if v := util.GetViper().GetString("master.mdir"); v != "" {
|
||||||
|
*m.metaFolder = v
|
||||||
|
}
|
||||||
|
|
||||||
grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
|
grace.SetupProfiling(*masterCpuProfile, *masterMemProfile)
|
||||||
|
|
||||||
parent, _ := util.FullPath(*m.metaFolder).DirAndName()
|
parent, _ := util.FullPath(*m.metaFolder).DirAndName()
|
||||||
|
@ -326,5 +336,7 @@ func (m *MasterOptions) toMasterOption(whiteList []string) *weed_server.MasterOp
|
||||||
DisableHttp: *m.disableHttp,
|
DisableHttp: *m.disableHttp,
|
||||||
MetricsAddress: *m.metricsAddress,
|
MetricsAddress: *m.metricsAddress,
|
||||||
MetricsIntervalSec: *m.metricsIntervalSec,
|
MetricsIntervalSec: *m.metricsIntervalSec,
|
||||||
|
TelemetryUrl: *m.telemetryUrl,
|
||||||
|
TelemetryEnabled: *m.telemetryEnabled,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,8 @@ func init() {
|
||||||
masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
|
masterOptions.raftBootstrap = cmdServer.Flag.Bool("master.raftBootstrap", false, "Whether to bootstrap the Raft cluster")
|
||||||
masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
|
masterOptions.heartbeatInterval = cmdServer.Flag.Duration("master.heartbeatInterval", 300*time.Millisecond, "heartbeat interval of master servers, and will be randomly multiplied by [1, 1.25)")
|
||||||
masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers")
|
masterOptions.electionTimeout = cmdServer.Flag.Duration("master.electionTimeout", 10*time.Second, "election timeout of master servers")
|
||||||
|
masterOptions.telemetryUrl = cmdServer.Flag.String("master.telemetry.url", "https://telemetry.seaweedfs.com/api/collect", "telemetry server URL to send usage statistics")
|
||||||
|
masterOptions.telemetryEnabled = cmdServer.Flag.Bool("master.telemetry", false, "enable telemetry reporting")
|
||||||
|
|
||||||
filerOptions.filerGroup = cmdServer.Flag.String("filer.filerGroup", "", "share metadata with other filers in the same filerGroup")
|
filerOptions.filerGroup = cmdServer.Flag.String("filer.filerGroup", "", "share metadata with other filers in the same filerGroup")
|
||||||
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
filerOptions.collection = cmdServer.Flag.String("filer.collection", "", "all data will be stored in this collection")
|
||||||
|
|
|
@ -19,5 +19,8 @@ func runVersion(cmd *Command, args []string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH)
|
fmt.Printf("version %s %s %s\n", version.Version(), runtime.GOOS, runtime.GOARCH)
|
||||||
|
println()
|
||||||
|
println("For enterprise users, please visit https://seaweedfs.com for SeaweedFS Enterprise Edition,")
|
||||||
|
println("which has a self-healing storage format with better data protection.")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -169,7 +169,7 @@ func (store *AbstractSqlStore) InsertEntry(ctx context.Context, entry *filer.Ent
|
||||||
if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") {
|
if err != nil && strings.Contains(strings.ToLower(err.Error()), "duplicate entry") {
|
||||||
// now the insert failed possibly due to duplication constraints
|
// now the insert failed possibly due to duplication constraints
|
||||||
sqlInsert = "falls back to update"
|
sqlInsert = "falls back to update"
|
||||||
glog.V(1).Infof("insert %s %s: %v", entry.FullPath, sqlInsert, err)
|
glog.V(1).InfofCtx(ctx, "insert %s %s: %v", entry.FullPath, sqlInsert, err)
|
||||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
res, err = db.ExecContext(ctx, store.GetSqlUpdate(bucket), meta, util.HashStringToLong(dir), name, dir)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -277,7 +277,7 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
glog.V(4).InfofCtx(ctx, "delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
|
||||||
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
|
res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
|
||||||
|
@ -312,7 +312,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||||
var name string
|
var name string
|
||||||
var data []byte
|
var data []byte
|
||||||
if err = rows.Scan(&name, &data); err != nil {
|
if err = rows.Scan(&name, &data); err != nil {
|
||||||
glog.V(0).Infof("scan %s : %v", dirPath, err)
|
glog.V(0).InfofCtx(ctx, "scan %s : %v", dirPath, err)
|
||||||
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
return lastFileName, fmt.Errorf("scan %s: %v", dirPath, err)
|
||||||
}
|
}
|
||||||
lastFileName = name
|
lastFileName = name
|
||||||
|
@ -321,7 +321,7 @@ func (store *AbstractSqlStore) ListDirectoryPrefixedEntries(ctx context.Context,
|
||||||
FullPath: util.NewFullPath(string(dirPath), name),
|
FullPath: util.NewFullPath(string(dirPath), name),
|
||||||
}
|
}
|
||||||
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
||||||
glog.V(0).Infof("scan decode %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "scan decode %s : %v", entry.FullPath, err)
|
||||||
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
return lastFileName, fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ func (store *AbstractSqlStore) KvPut(ctx context.Context, key []byte, value []by
|
||||||
}
|
}
|
||||||
|
|
||||||
// now the insert failed possibly due to duplication constraints
|
// now the insert failed possibly due to duplication constraints
|
||||||
glog.V(1).Infof("kv insert falls back to update: %s", err)
|
glog.V(1).InfofCtx(ctx, "kv insert falls back to update: %s", err)
|
||||||
|
|
||||||
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
|
res, err = db.ExecContext(ctx, store.GetSqlUpdate(DEFAULT_TABLE), value, dirHash, name, dirStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -233,7 +233,7 @@ func (store *ArangodbStore) FindEntry(ctx context.Context, fullpath util.FullPat
|
||||||
if driver.IsNotFound(err) {
|
if driver.IsNotFound(err) {
|
||||||
return nil, filer_pb.ErrNotFound
|
return nil, filer_pb.ErrNotFound
|
||||||
}
|
}
|
||||||
glog.Errorf("find %s: %v", fullpath, err)
|
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
|
||||||
return nil, filer_pb.ErrNotFound
|
return nil, filer_pb.ErrNotFound
|
||||||
}
|
}
|
||||||
if len(data.Meta) == 0 {
|
if len(data.Meta) == 0 {
|
||||||
|
@ -257,7 +257,7 @@ func (store *ArangodbStore) DeleteEntry(ctx context.Context, fullpath util.FullP
|
||||||
}
|
}
|
||||||
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
|
_, err = targetCollection.RemoveDocument(ctx, hashString(string(fullpath)))
|
||||||
if err != nil && !driver.IsNotFound(err) {
|
if err != nil && !driver.IsNotFound(err) {
|
||||||
glog.Errorf("find %s: %v", fullpath, err)
|
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
|
||||||
return fmt.Errorf("delete %s : %v", fullpath, err)
|
return fmt.Errorf("delete %s : %v", fullpath, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -331,7 +331,7 @@ sort d.name asc
|
||||||
converted := arrayToBytes(data.Meta)
|
converted := arrayToBytes(data.Meta)
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(converted)); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -38,7 +38,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
|
||||||
return nil, filer.ErrKvNotFound
|
return nil, filer.ErrKvNotFound
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("kv get: %s %v", string(key), err)
|
glog.ErrorfCtx(ctx, "kv get: %s %v", string(key), err)
|
||||||
return nil, filer.ErrKvNotFound
|
return nil, filer.ErrKvNotFound
|
||||||
}
|
}
|
||||||
return arrayToBytes(model.Meta), nil
|
return arrayToBytes(model.Meta), nil
|
||||||
|
@ -47,7 +47,7 @@ func (store *ArangodbStore) KvGet(ctx context.Context, key []byte) (value []byte
|
||||||
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
func (store *ArangodbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||||
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
|
_, err = store.kvCollection.RemoveDocument(ctx, hashString(".kvstore."+string(key)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("kv del: %v", err)
|
glog.ErrorfCtx(ctx, "kv del: %v", err)
|
||||||
return filer.ErrKvNotFound
|
return filer.ErrKvNotFound
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/gocql/gocql"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/gocql/gocql"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||||
|
@ -202,7 +203,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
|
||||||
lastFileName = name
|
lastFileName = name
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
@ -210,7 +211,7 @@ func (store *CassandraStore) ListDirectoryEntries(ctx context.Context, dirPath u
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err = iter.Close(); err != nil {
|
if err = iter.Close(); err != nil {
|
||||||
glog.V(0).Infof("list iterator close: %v", err)
|
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return lastFileName, err
|
return lastFileName, err
|
||||||
|
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/gocql/gocql"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/gocql/gocql"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||||
|
@ -202,7 +203,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
|
||||||
lastFileName = name
|
lastFileName = name
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
@ -210,7 +211,7 @@ func (store *Cassandra2Store) ListDirectoryEntries(ctx context.Context, dirPath
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err = iter.Close(); err != nil {
|
if err = iter.Close(); err != nil {
|
||||||
glog.V(0).Infof("list iterator close: %v", err)
|
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return lastFileName, err
|
return lastFileName, err
|
||||||
|
|
|
@ -113,7 +113,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||||
}
|
}
|
||||||
value, err := jsoniter.Marshal(esEntry)
|
value, err := jsoniter.Marshal(esEntry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
|
||||||
return fmt.Errorf("insert entry marshal %v", err)
|
return fmt.Errorf("insert entry marshal %v", err)
|
||||||
}
|
}
|
||||||
_, err = store.client.Index().
|
_, err = store.client.Index().
|
||||||
|
@ -123,7 +123,7 @@ func (store *ElasticStore) InsertEntry(ctx context.Context, entry *filer.Entry)
|
||||||
BodyJson(string(value)).
|
BodyJson(string(value)).
|
||||||
Do(ctx)
|
Do(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("insert entry(%s) %v.", string(entry.FullPath), err)
|
glog.ErrorfCtx(ctx, "insert entry(%s) %v.", string(entry.FullPath), err)
|
||||||
return fmt.Errorf("insert entry %v", err)
|
return fmt.Errorf("insert entry %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -152,7 +152,7 @@ func (store *ElasticStore) FindEntry(ctx context.Context, fullpath weed_util.Ful
|
||||||
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
|
err := jsoniter.Unmarshal(searchResult.Source, esEntry)
|
||||||
return esEntry.Entry, err
|
return esEntry.Entry, err
|
||||||
}
|
}
|
||||||
glog.Errorf("find entry(%s),%v.", string(fullpath), err)
|
glog.ErrorfCtx(ctx, "find entry(%s),%v.", string(fullpath), err)
|
||||||
return nil, filer_pb.ErrNotFound
|
return nil, filer_pb.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ func (store *ElasticStore) deleteIndex(ctx context.Context, index string) (err e
|
||||||
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
|
if elastic.IsNotFound(err) || (err == nil && deleteResult.Acknowledged) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
glog.Errorf("delete index(%s) %v.", index, err)
|
glog.ErrorfCtx(ctx, "delete index(%s) %v.", index, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,14 +193,14 @@ func (store *ElasticStore) deleteEntry(ctx context.Context, index, id string) (e
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.Errorf("delete entry(index:%s,_id:%s) %v.", index, id, err)
|
glog.ErrorfCtx(ctx, "delete entry(index:%s,_id:%s) %v.", index, id, err)
|
||||||
return fmt.Errorf("delete entry %v", err)
|
return fmt.Errorf("delete entry %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
|
func (store *ElasticStore) DeleteFolderChildren(ctx context.Context, fullpath weed_util.FullPath) (err error) {
|
||||||
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
|
_, err = store.ListDirectoryEntries(ctx, fullpath, "", false, math.MaxInt32, func(entry *filer.Entry) bool {
|
||||||
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
|
if err := store.DeleteEntry(ctx, entry.FullPath); err != nil {
|
||||||
glog.Errorf("elastic delete %s: %v.", entry.FullPath, err)
|
glog.ErrorfCtx(ctx, "elastic delete %s: %v.", entry.FullPath, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
@ -228,7 +228,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
||||||
result := &elastic.SearchResult{}
|
result := &elastic.SearchResult{}
|
||||||
if (startFileName == "" && first) || inclusive {
|
if (startFileName == "" && first) || inclusive {
|
||||||
if result, err = store.search(ctx, index, parentId); err != nil {
|
if result, err = store.search(ctx, index, parentId); err != nil {
|
||||||
glog.Errorf("search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
glog.ErrorfCtx(ctx, "search (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -238,7 +238,7 @@ func (store *ElasticStore) listDirectoryEntries(
|
||||||
}
|
}
|
||||||
after := weed_util.Md5String([]byte(fullPath))
|
after := weed_util.Md5String([]byte(fullPath))
|
||||||
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
|
if result, err = store.searchAfter(ctx, index, parentId, after); err != nil {
|
||||||
glog.Errorf("searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
glog.ErrorfCtx(ctx, "searchAfter (%s,%s,%t,%d) %v.", string(fullpath), startFileName, inclusive, limit, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ func (store *ElasticStore) KvDelete(ctx context.Context, key []byte) (err error)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.Errorf("delete key(id:%s) %v.", string(key), err)
|
glog.ErrorfCtx(ctx, "delete key(id:%s) %v.", string(key), err)
|
||||||
return fmt.Errorf("delete key %v", err)
|
return fmt.Errorf("delete key %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ func (store *ElasticStore) KvGet(ctx context.Context, key []byte) (value []byte,
|
||||||
return esEntry.Value, nil
|
return esEntry.Value, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
glog.Errorf("find key(%s),%v.", string(key), err)
|
glog.ErrorfCtx(ctx, "find key(%s),%v.", string(key), err)
|
||||||
return value, filer.ErrKvNotFound
|
return value, filer.ErrKvNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ func (store *ElasticStore) KvPut(ctx context.Context, key []byte, value []byte)
|
||||||
esEntry := &ESKVEntry{value}
|
esEntry := &ESKVEntry{value}
|
||||||
val, err := jsoniter.Marshal(esEntry)
|
val, err := jsoniter.Marshal(esEntry)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("insert key(%s) %v.", string(key), err)
|
glog.ErrorfCtx(ctx, "insert key(%s) %v.", string(key), err)
|
||||||
return fmt.Errorf("insert key %v", err)
|
return fmt.Errorf("insert key %v", err)
|
||||||
}
|
}
|
||||||
_, err = store.client.Index().
|
_, err = store.client.Index().
|
||||||
|
|
|
@ -4,10 +4,11 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go.etcd.io/etcd/client/pkg/v3/transport"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"go.etcd.io/etcd/client/pkg/v3/transport"
|
||||||
|
|
||||||
"go.etcd.io/etcd/client/v3"
|
"go.etcd.io/etcd/client/v3"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
|
@ -95,7 +96,7 @@ func (store *EtcdStore) initialize(servers, username, password string, timeout t
|
||||||
return fmt.Errorf("error checking etcd connection: %s", err)
|
return fmt.Errorf("error checking etcd connection: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(0).Infof("сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
|
glog.V(0).InfofCtx(ctx, "сonnection to etcd has been successfully verified. etcd version: %s", resp.Version)
|
||||||
store.client = client
|
store.client = client
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -208,7 +209,7 @@ func (store *EtcdStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
|
||||||
}
|
}
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(kv.Value)); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -106,7 +106,7 @@ func ResolveOneChunkManifest(ctx context.Context, lookupFileIdFn wdclient.Lookup
|
||||||
func fetchWholeChunk(ctx context.Context, bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
|
func fetchWholeChunk(ctx context.Context, bytesBuffer *bytes.Buffer, lookupFileIdFn wdclient.LookupFileIdFunctionType, fileId string, cipherKey []byte, isGzipped bool) error {
|
||||||
urlStrings, err := lookupFileIdFn(ctx, fileId)
|
urlStrings, err := lookupFileIdFn(ctx, fileId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("operation LookupFileId %s failed, err: %v", fileId, err)
|
glog.ErrorfCtx(ctx, "operation LookupFileId %s failed, err: %v", fileId, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = retriedStreamFetchChunkData(ctx, bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
|
err = retriedStreamFetchChunkData(ctx, bytesBuffer, urlStrings, "", cipherKey, isGzipped, true, 0, 0)
|
||||||
|
@ -159,7 +159,7 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("read %s failed, err: %v", urlString, err)
|
glog.V(0).InfofCtx(ctx, "read %s failed, err: %v", urlString, err)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
@ -169,7 +169,7 @@ func retriedStreamFetchChunkData(ctx context.Context, writer io.Writer, urlStrin
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil && shouldRetry {
|
if err != nil && shouldRetry {
|
||||||
glog.V(0).Infof("retry reading in %v", waitTime)
|
glog.V(0).InfofCtx(ctx, "retry reading in %v", waitTime)
|
||||||
time.Sleep(waitTime)
|
time.Sleep(waitTime)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
|
|
|
@ -220,19 +220,19 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
glog.V(4).InfofCtx(ctx, "InsertEntry %s: new entry: %v", entry.FullPath, entry.Name())
|
||||||
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
if err := f.Store.InsertEntry(ctx, entry); err != nil {
|
||||||
glog.Errorf("insert entry %s: %v", entry.FullPath, err)
|
glog.ErrorfCtx(ctx, "insert entry %s: %v", entry.FullPath, err)
|
||||||
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
return fmt.Errorf("insert entry %s: %v", entry.FullPath, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if o_excl {
|
if o_excl {
|
||||||
glog.V(3).Infof("EEXIST: entry %s already exists", entry.FullPath)
|
glog.V(3).InfofCtx(ctx, "EEXIST: entry %s already exists", entry.FullPath)
|
||||||
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
return fmt.Errorf("EEXIST: entry %s already exists", entry.FullPath)
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
glog.V(4).InfofCtx(ctx, "UpdateEntry %s: old entry: %v", entry.FullPath, oldEntry.Name())
|
||||||
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
if err := f.UpdateEntry(ctx, oldEntry, entry); err != nil {
|
||||||
glog.Errorf("update entry %s: %v", entry.FullPath, err)
|
glog.ErrorfCtx(ctx, "update entry %s: %v", entry.FullPath, err)
|
||||||
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
return fmt.Errorf("update entry %s: %v", entry.FullPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -241,7 +241,7 @@ func (f *Filer) CreateEntry(ctx context.Context, entry *Entry, o_excl bool, isFr
|
||||||
|
|
||||||
f.deleteChunksIfNotNew(ctx, oldEntry, entry)
|
f.deleteChunksIfNotNew(ctx, oldEntry, entry)
|
||||||
|
|
||||||
glog.V(4).Infof("CreateEntry %s: created", entry.FullPath)
|
glog.V(4).InfofCtx(ctx, "CreateEntry %s: created", entry.FullPath)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -256,7 +256,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
||||||
// fmt.Printf("%d dirPath: %+v\n", level, dirPath)
|
// fmt.Printf("%d dirPath: %+v\n", level, dirPath)
|
||||||
|
|
||||||
// check the store directly
|
// check the store directly
|
||||||
glog.V(4).Infof("find uncached directory: %s", dirPath)
|
glog.V(4).InfofCtx(ctx, "find uncached directory: %s", dirPath)
|
||||||
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
|
dirEntry, _ := f.FindEntry(ctx, util.FullPath(dirPath))
|
||||||
|
|
||||||
// no such existing directory
|
// no such existing directory
|
||||||
|
@ -291,11 +291,11 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(2).Infof("create directory: %s %v", dirPath, dirEntry.Mode)
|
glog.V(2).InfofCtx(ctx, "create directory: %s %v", dirPath, dirEntry.Mode)
|
||||||
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
|
mkdirErr := f.Store.InsertEntry(ctx, dirEntry)
|
||||||
if mkdirErr != nil {
|
if mkdirErr != nil {
|
||||||
if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil {
|
if fEntry, err := f.FindEntry(ctx, util.FullPath(dirPath)); err == filer_pb.ErrNotFound || fEntry == nil {
|
||||||
glog.V(3).Infof("mkdir %s: %v", dirPath, mkdirErr)
|
glog.V(3).InfofCtx(ctx, "mkdir %s: %v", dirPath, mkdirErr)
|
||||||
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
return fmt.Errorf("mkdir %s: %v", dirPath, mkdirErr)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -305,7 +305,7 @@ func (f *Filer) ensureParentDirectoryEntry(ctx context.Context, entry *Entry, di
|
||||||
}
|
}
|
||||||
|
|
||||||
} else if !dirEntry.IsDirectory() {
|
} else if !dirEntry.IsDirectory() {
|
||||||
glog.Errorf("CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
glog.ErrorfCtx(ctx, "CreateEntry %s: %s should be a directory", entry.FullPath, dirPath)
|
||||||
return fmt.Errorf("%s is a file", dirPath)
|
return fmt.Errorf("%s is a file", dirPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -316,11 +316,11 @@ func (f *Filer) UpdateEntry(ctx context.Context, oldEntry, entry *Entry) (err er
|
||||||
if oldEntry != nil {
|
if oldEntry != nil {
|
||||||
entry.Attr.Crtime = oldEntry.Attr.Crtime
|
entry.Attr.Crtime = oldEntry.Attr.Crtime
|
||||||
if oldEntry.IsDirectory() && !entry.IsDirectory() {
|
if oldEntry.IsDirectory() && !entry.IsDirectory() {
|
||||||
glog.Errorf("existing %s is a directory", oldEntry.FullPath)
|
glog.ErrorfCtx(ctx, "existing %s is a directory", oldEntry.FullPath)
|
||||||
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
|
return fmt.Errorf("existing %s is a directory", oldEntry.FullPath)
|
||||||
}
|
}
|
||||||
if !oldEntry.IsDirectory() && entry.IsDirectory() {
|
if !oldEntry.IsDirectory() && entry.IsDirectory() {
|
||||||
glog.Errorf("existing %s is a file", oldEntry.FullPath)
|
glog.ErrorfCtx(ctx, "existing %s is a file", oldEntry.FullPath)
|
||||||
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
|
return fmt.Errorf("existing %s is a file", oldEntry.FullPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (f *Filer) DeleteEntryMetaAndData(ctx context.Context, p util.FullPath, isR
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(2).Infof("delete directory %s: %v", p, err)
|
glog.V(2).InfofCtx(ctx, "delete directory %s: %v", p, err)
|
||||||
return fmt.Errorf("delete directory %s: %v", p, err)
|
return fmt.Errorf("delete directory %s: %v", p, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,12 +74,12 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||||
for {
|
for {
|
||||||
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
|
entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("list folder %s: %v", entry.FullPath, err)
|
glog.ErrorfCtx(ctx, "list folder %s: %v", entry.FullPath, err)
|
||||||
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
return fmt.Errorf("list folder %s: %v", entry.FullPath, err)
|
||||||
}
|
}
|
||||||
if lastFileName == "" && !isRecursive && len(entries) > 0 {
|
if lastFileName == "" && !isRecursive && len(entries) > 0 {
|
||||||
// only for first iteration in the loop
|
// only for first iteration in the loop
|
||||||
glog.V(2).Infof("deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
glog.V(2).InfofCtx(ctx, "deleting a folder %s has children: %+v ...", entry.FullPath, entries[0].Name())
|
||||||
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
|
return fmt.Errorf("%s: %s", MsgFailDelNonEmptyFolder, entry.FullPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(3).Infof("deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
glog.V(3).InfofCtx(ctx, "deleting directory %v delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||||
|
|
||||||
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
|
if storeDeletionErr := f.Store.DeleteFolderChildren(ctx, entry.FullPath); storeDeletionErr != nil {
|
||||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||||
|
@ -124,7 +124,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
|
||||||
|
|
||||||
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
|
func (f *Filer) doDeleteEntryMetaAndData(ctx context.Context, entry *Entry, shouldDeleteChunks bool, isFromOtherCluster bool, signatures []int32) (err error) {
|
||||||
|
|
||||||
glog.V(3).Infof("deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
glog.V(3).InfofCtx(ctx, "deleting entry %v, delete chunks: %v", entry.FullPath, shouldDeleteChunks)
|
||||||
|
|
||||||
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
|
if storeDeletionErr := f.Store.DeleteOneEntry(ctx, entry); storeDeletionErr != nil {
|
||||||
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
return fmt.Errorf("filer store delete: %v", storeDeletionErr)
|
||||||
|
@ -153,7 +153,7 @@ func (f *Filer) DoDeleteCollection(collectionName string) (err error) {
|
||||||
func (f *Filer) maybeDeleteHardLinks(ctx context.Context, hardLinkIds []HardLinkId) {
|
func (f *Filer) maybeDeleteHardLinks(ctx context.Context, hardLinkIds []HardLinkId) {
|
||||||
for _, hardLinkId := range hardLinkIds {
|
for _, hardLinkId := range hardLinkIds {
|
||||||
if err := f.Store.DeleteHardLink(ctx, hardLinkId); err != nil {
|
if err := f.Store.DeleteHardLink(ctx, hardLinkId); err != nil {
|
||||||
glog.Errorf("delete hard link id %d : %v", hardLinkId, err)
|
glog.ErrorfCtx(ctx, "delete hard link id %d : %v", hardLinkId, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,7 +93,7 @@ func (f *Filer) doDeleteChunks(ctx context.Context, chunks []*filer_pb.FileChunk
|
||||||
}
|
}
|
||||||
dataChunks, manifestResolveErr := ResolveOneChunkManifest(ctx, f.MasterClient.LookupFileId, chunk)
|
dataChunks, manifestResolveErr := ResolveOneChunkManifest(ctx, f.MasterClient.LookupFileId, chunk)
|
||||||
if manifestResolveErr != nil {
|
if manifestResolveErr != nil {
|
||||||
glog.V(0).Infof("failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
|
glog.V(0).InfofCtx(ctx, "failed to resolve manifest %s: %v", chunk.FileId, manifestResolveErr)
|
||||||
}
|
}
|
||||||
for _, dChunk := range dataChunks {
|
for _, dChunk := range dataChunks {
|
||||||
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
|
f.fileIdDeletionQueue.EnQueue(dChunk.GetFileIdString())
|
||||||
|
@ -119,7 +119,7 @@ func (f *Filer) deleteChunksIfNotNew(ctx context.Context, oldEntry, newEntry *En
|
||||||
|
|
||||||
toDelete, err := MinusChunks(ctx, f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
|
toDelete, err := MinusChunks(ctx, f.MasterClient.GetLookupFileIdFunction(), oldChunks, newChunks)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
|
glog.ErrorfCtx(ctx, "Failed to resolve old entry chunks when delete old entry chunks. new: %s, old: %s", newChunks, oldChunks)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
f.DeleteChunksNotRecursive(toDelete)
|
f.DeleteChunksNotRecursive(toDelete)
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||||
)
|
)
|
||||||
|
@ -31,7 +32,7 @@ func (fsw *FilerStoreWrapper) handleUpdateToHardLinks(ctx context.Context, entry
|
||||||
|
|
||||||
// remove old hard link
|
// remove old hard link
|
||||||
if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
|
if err == nil && len(existingEntry.HardLinkId) != 0 && bytes.Compare(existingEntry.HardLinkId, entry.HardLinkId) != 0 {
|
||||||
glog.V(4).Infof("handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
|
glog.V(4).InfofCtx(ctx, "handleUpdateToHardLinks DeleteHardLink %s", entry.FullPath)
|
||||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -50,7 +51,7 @@ func (fsw *FilerStoreWrapper) setHardLink(ctx context.Context, entry *Entry) err
|
||||||
return encodeErr
|
return encodeErr
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
glog.V(4).InfofCtx(ctx, "setHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||||
|
|
||||||
return fsw.KvPut(ctx, key, newBlob)
|
return fsw.KvPut(ctx, key, newBlob)
|
||||||
}
|
}
|
||||||
|
@ -63,16 +64,16 @@ func (fsw *FilerStoreWrapper) maybeReadHardLink(ctx context.Context, entry *Entr
|
||||||
|
|
||||||
value, err := fsw.KvGet(ctx, key)
|
value, err := fsw.KvGet(ctx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
glog.ErrorfCtx(ctx, "read %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = entry.DecodeAttributesAndChunks(value); err != nil {
|
if err = entry.DecodeAttributesAndChunks(value); err != nil {
|
||||||
glog.Errorf("decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
glog.ErrorfCtx(ctx, "decode %s hardlink %d: %v", entry.FullPath, entry.HardLinkId, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
glog.V(4).InfofCtx(ctx, "maybeReadHardLink %v nlink:%d", entry.FullPath, entry.HardLinkCounter)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -94,7 +95,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
|
||||||
|
|
||||||
entry.HardLinkCounter--
|
entry.HardLinkCounter--
|
||||||
if entry.HardLinkCounter <= 0 {
|
if entry.HardLinkCounter <= 0 {
|
||||||
glog.V(4).Infof("DeleteHardLink KvDelete %v", key)
|
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvDelete %v", key)
|
||||||
return fsw.KvDelete(ctx, key)
|
return fsw.KvDelete(ctx, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ func (fsw *FilerStoreWrapper) DeleteHardLink(ctx context.Context, hardLinkId Har
|
||||||
return encodeErr
|
return encodeErr
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("DeleteHardLink KvPut %v", key)
|
glog.V(4).InfofCtx(ctx, "DeleteHardLink KvPut %v", key)
|
||||||
return fsw.KvPut(ctx, key, newBlob)
|
return fsw.KvPut(ctx, key, newBlob)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,7 +192,7 @@ func (fsw *FilerStoreWrapper) DeleteEntry(ctx context.Context, fp util.FullPath)
|
||||||
// remove hard link
|
// remove hard link
|
||||||
op := ctx.Value("OP")
|
op := ctx.Value("OP")
|
||||||
if op != "MV" {
|
if op != "MV" {
|
||||||
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
|
||||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -215,7 +215,7 @@ func (fsw *FilerStoreWrapper) DeleteOneEntry(ctx context.Context, existingEntry
|
||||||
// remove hard link
|
// remove hard link
|
||||||
op := ctx.Value("OP")
|
op := ctx.Value("OP")
|
||||||
if op != "MV" {
|
if op != "MV" {
|
||||||
glog.V(4).Infof("DeleteHardLink %s", existingEntry.FullPath)
|
glog.V(4).InfofCtx(ctx, "DeleteHardLink %s", existingEntry.FullPath)
|
||||||
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
if err = fsw.DeleteHardLink(ctx, existingEntry.HardLinkId); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,7 +203,7 @@ func (store *HbaseStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPa
|
||||||
}
|
}
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(value)); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -4,13 +4,14 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
"github.com/syndtr/goleveldb/leveldb"
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
leveldb_errors "github.com/syndtr/goleveldb/leveldb/errors"
|
||||||
"github.com/syndtr/goleveldb/leveldb/filter"
|
"github.com/syndtr/goleveldb/leveldb/filter"
|
||||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
leveldb_util "github.com/syndtr/goleveldb/leveldb/util"
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
|
@ -205,7 +206,7 @@ func (store *LevelDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||||
}
|
}
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -213,7 +213,7 @@ func (store *LevelDB2Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
||||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -342,7 +342,7 @@ func (store *LevelDB3Store) ListDirectoryPrefixedEntries(ctx context.Context, di
|
||||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(weed_util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -187,7 +187,7 @@ func (store *MongodbStore) FindEntry(ctx context.Context, fullpath util.FullPath
|
||||||
var where = bson.M{"directory": dir, "name": name}
|
var where = bson.M{"directory": dir, "name": name}
|
||||||
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
||||||
if err != mongo.ErrNoDocuments && err != nil {
|
if err != mongo.ErrNoDocuments && err != nil {
|
||||||
glog.Errorf("find %s: %v", fullpath, err)
|
glog.ErrorfCtx(ctx, "find %s: %v", fullpath, err)
|
||||||
return nil, filer_pb.ErrNotFound
|
return nil, filer_pb.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,14 +234,22 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||||
"directory": string(dirPath),
|
"directory": string(dirPath),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nameQuery := bson.M{}
|
||||||
|
|
||||||
if len(prefix) > 0 {
|
if len(prefix) > 0 {
|
||||||
where["name"].(bson.M)["$regex"] = "^" + regexp.QuoteMeta(prefix)
|
nameQuery["$regex"] = "^" + regexp.QuoteMeta(prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(startFileName) > 0 {
|
||||||
if includeStartFile {
|
if includeStartFile {
|
||||||
where["name"].(bson.M)["$gte"] = startFileName
|
nameQuery["$gte"] = startFileName
|
||||||
} else {
|
} else {
|
||||||
where["name"].(bson.M)["$gt"] = startFileName
|
nameQuery["$gt"] = startFileName
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(nameQuery) > 0 {
|
||||||
|
where["name"] = nameQuery
|
||||||
}
|
}
|
||||||
|
|
||||||
optLimit := int64(limit)
|
optLimit := int64(limit)
|
||||||
|
@ -264,7 +272,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||||
lastFileName = data.Name
|
lastFileName = data.Name
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data.Meta)); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,7 +283,7 @@ func (store *MongodbStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cur.Close(ctx); err != nil {
|
if err := cur.Close(ctx); err != nil {
|
||||||
glog.V(0).Infof("list iterator close: %v", err)
|
glog.V(0).InfofCtx(ctx, "list iterator close: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return lastFileName, err
|
return lastFileName, err
|
||||||
|
|
|
@ -3,6 +3,7 @@ package mongodb
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
"go.mongodb.org/mongo-driver/bson"
|
"go.mongodb.org/mongo-driver/bson"
|
||||||
|
@ -37,7 +38,7 @@ func (store *MongodbStore) KvGet(ctx context.Context, key []byte) (value []byte,
|
||||||
var where = bson.M{"directory": dir, "name": name}
|
var where = bson.M{"directory": dir, "name": name}
|
||||||
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
err = store.connect.Database(store.database).Collection(store.collectionName).FindOne(ctx, where).Decode(&data)
|
||||||
if err != mongo.ErrNoDocuments && err != nil {
|
if err != mongo.ErrNoDocuments && err != nil {
|
||||||
glog.Errorf("kv get: %v", err)
|
glog.ErrorfCtx(ctx, "kv get: %v", err)
|
||||||
return nil, filer.ErrKvNotFound
|
return nil, filer.ErrKvNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ func LookupFn(filerClient filer_pb.FilerClient) wdclient.LookupFileIdFunctionTyp
|
||||||
|
|
||||||
locations = resp.LocationsMap[vid]
|
locations = resp.LocationsMap[vid]
|
||||||
if locations == nil || len(locations.Locations) == 0 {
|
if locations == nil || len(locations.Locations) == 0 {
|
||||||
glog.V(0).Infof("failed to locate %s", fileId)
|
glog.V(0).InfofCtx(ctx, "failed to locate %s", fileId)
|
||||||
return fmt.Errorf("failed to locate %s", fileId)
|
return fmt.Errorf("failed to locate %s", fileId)
|
||||||
}
|
}
|
||||||
vicCacheLock.Lock()
|
vicCacheLock.Lock()
|
||||||
|
|
|
@ -179,7 +179,7 @@ func (store *UniversalRedisStore) ListDirectoryEntries(ctx context.Context, dirP
|
||||||
entry, err := store.FindEntry(ctx, path)
|
entry, err := store.FindEntry(ctx, path)
|
||||||
lastFileName = fileName
|
lastFileName = fileName
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("list %s : %v", path, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||||
if err == filer_pb.ErrNotFound {
|
if err == filer_pb.ErrNotFound {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -194,7 +194,7 @@ func (store *UniversalRedis2Store) ListDirectoryEntries(ctx context.Context, dir
|
||||||
entry, err := store.FindEntry(ctx, path)
|
entry, err := store.FindEntry(ctx, path)
|
||||||
lastFileName = fileName
|
lastFileName = fileName
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("list %s : %v", path, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||||
if err == filer_pb.ErrNotFound {
|
if err == filer_pb.ErrNotFound {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,6 +3,7 @@ package redis3
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/redis/go-redis/v9"
|
"github.com/redis/go-redis/v9"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
)
|
)
|
||||||
|
@ -31,7 +32,7 @@ func insertChild(ctx context.Context, redisStore *UniversalRedis3Store, key stri
|
||||||
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
nameList := LoadItemList([]byte(data), key, client, store, maxNameBatchSizeLimit)
|
||||||
|
|
||||||
if err := nameList.WriteName(name); err != nil {
|
if err := nameList.WriteName(name); err != nil {
|
||||||
glog.Errorf("add %s %s: %v", key, name, err)
|
glog.ErrorfCtx(ctx, "add %s %s: %v", key, name, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +101,7 @@ func removeChildren(ctx context.Context, redisStore *UniversalRedis3Store, key s
|
||||||
|
|
||||||
if err = nameList.ListNames("", func(name string) bool {
|
if err = nameList.ListNames("", func(name string) bool {
|
||||||
if err := onDeleteFn(name); err != nil {
|
if err := onDeleteFn(name); err != nil {
|
||||||
glog.Errorf("delete %s child %s: %v", key, name, err)
|
glog.ErrorfCtx(ctx, "delete %s child %s: %v", key, name, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -151,7 +151,7 @@ func (store *UniversalRedis3Store) ListDirectoryEntries(ctx context.Context, dir
|
||||||
entry, err := store.FindEntry(ctx, path)
|
entry, err := store.FindEntry(ctx, path)
|
||||||
lastFileName = fileName
|
lastFileName = fileName
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("list %s : %v", path, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||||
if err == filer_pb.ErrNotFound {
|
if err == filer_pb.ErrNotFound {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,7 +162,7 @@ func (store *UniversalRedisLuaStore) ListDirectoryEntries(ctx context.Context, d
|
||||||
entry, err := store.FindEntry(ctx, path)
|
entry, err := store.FindEntry(ctx, path)
|
||||||
lastFileName = fileName
|
lastFileName = fileName
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(0).Infof("list %s : %v", path, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", path, err)
|
||||||
if err == filer_pb.ErrNotFound {
|
if err == filer_pb.ErrNotFound {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -266,7 +266,7 @@ func (store *RocksDBStore) ListDirectoryPrefixedEntries(ctx context.Context, dir
|
||||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(value); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -82,7 +82,7 @@ func noJwtFunc(string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
|
func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclient.HasLookupFileIdFunction, jwtFunc VolumeServerJwtFunction, chunks []*filer_pb.FileChunk, offset int64, size int64, downloadMaxBytesPs int64) (DoStreamContent, error) {
|
||||||
glog.V(4).Infof("prepare to stream content for chunks: %d", len(chunks))
|
glog.V(4).InfofCtx(ctx, "prepare to stream content for chunks: %d", len(chunks))
|
||||||
chunkViews := ViewFromChunks(ctx, masterClient.GetLookupFileIdFunction(), chunks, offset, size)
|
chunkViews := ViewFromChunks(ctx, masterClient.GetLookupFileIdFunction(), chunks, offset, size)
|
||||||
|
|
||||||
fileId2Url := make(map[string][]string)
|
fileId2Url := make(map[string][]string)
|
||||||
|
@ -96,15 +96,15 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
|
||||||
if err == nil && len(urlStrings) > 0 {
|
if err == nil && len(urlStrings) > 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("waiting for chunk: %s", chunkView.FileId)
|
glog.V(4).InfofCtx(ctx, "waiting for chunk: %s", chunkView.FileId)
|
||||||
time.Sleep(backoff)
|
time.Sleep(backoff)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||||
return nil, err
|
return nil, err
|
||||||
} else if len(urlStrings) == 0 {
|
} else if len(urlStrings) == 0 {
|
||||||
errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
|
errUrlNotFound := fmt.Errorf("operation LookupFileId %s failed, err: urls not found", chunkView.FileId)
|
||||||
glog.Error(errUrlNotFound)
|
glog.ErrorCtx(ctx, errUrlNotFound)
|
||||||
return nil, errUrlNotFound
|
return nil, errUrlNotFound
|
||||||
}
|
}
|
||||||
fileId2Url[chunkView.FileId] = urlStrings
|
fileId2Url[chunkView.FileId] = urlStrings
|
||||||
|
@ -118,7 +118,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
|
||||||
if offset < chunkView.ViewOffset {
|
if offset < chunkView.ViewOffset {
|
||||||
gap := chunkView.ViewOffset - offset
|
gap := chunkView.ViewOffset - offset
|
||||||
remaining -= gap
|
remaining -= gap
|
||||||
glog.V(4).Infof("zero [%d,%d)", offset, chunkView.ViewOffset)
|
glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||||
err := writeZero(writer, gap)
|
err := writeZero(writer, gap)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset)
|
return fmt.Errorf("write zero [%d,%d)", offset, chunkView.ViewOffset)
|
||||||
|
@ -140,7 +140,7 @@ func PrepareStreamContentWithThrottler(ctx context.Context, masterClient wdclien
|
||||||
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))
|
downloadThrottler.MaybeSlowdown(int64(chunkView.ViewSize))
|
||||||
}
|
}
|
||||||
if remaining > 0 {
|
if remaining > 0 {
|
||||||
glog.V(4).Infof("zero [%d,%d)", offset, offset+remaining)
|
glog.V(4).InfofCtx(ctx, "zero [%d,%d)", offset, offset+remaining)
|
||||||
err := writeZero(writer, remaining)
|
err := writeZero(writer, remaining)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining)
|
return fmt.Errorf("write zero [%d,%d)", offset, offset+remaining)
|
||||||
|
@ -192,7 +192,7 @@ func ReadAll(ctx context.Context, buffer []byte, masterClient *wdclient.MasterCl
|
||||||
chunkView := x.Value
|
chunkView := x.Value
|
||||||
urlStrings, err := lookupFileIdFn(ctx, chunkView.FileId)
|
urlStrings, err := lookupFileIdFn(ctx, chunkView.FileId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
glog.V(1).InfofCtx(ctx, "operation LookupFileId %s failed, err: %v", chunkView.FileId, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -260,39 +260,39 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(results) < 1 {
|
if len(results) < 1 {
|
||||||
glog.Errorf("Can't find results, data is empty")
|
glog.ErrorfCtx(ctx, "Can't find results, data is empty")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
rows, ok := results[0].([]interface{})
|
rows, ok := results[0].([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("Can't convert results[0] to list")
|
glog.ErrorfCtx(ctx, "Can't convert results[0] to list")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, result := range rows {
|
for _, result := range rows {
|
||||||
row, ok := result.([]interface{})
|
row, ok := result.([]interface{})
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("Can't convert result to list")
|
glog.ErrorfCtx(ctx, "Can't convert result to list")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(row) < 5 {
|
if len(row) < 5 {
|
||||||
glog.Errorf("Length of result is less than needed: %v", len(row))
|
glog.ErrorfCtx(ctx, "Length of result is less than needed: %v", len(row))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
nameRaw := row[2]
|
nameRaw := row[2]
|
||||||
name, ok := nameRaw.(string)
|
name, ok := nameRaw.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
|
glog.ErrorfCtx(ctx, "Can't convert name field to string. Actual type: %v, value: %v", reflect.TypeOf(nameRaw), nameRaw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
dataRaw := row[4]
|
dataRaw := row[4]
|
||||||
data, ok := dataRaw.(string)
|
data, ok := dataRaw.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
|
glog.ErrorfCtx(ctx, "Can't convert data field to string. Actual type: %v, value: %v", reflect.TypeOf(dataRaw), dataRaw)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -302,7 +302,7 @@ func (store *TarantoolStore) ListDirectoryEntries(ctx context.Context, dirPath w
|
||||||
lastFileName = name
|
lastFileName = name
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData([]byte(data))); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
if !eachEntryFunc(entry) {
|
||||||
|
|
|
@ -249,7 +249,7 @@ func (store *TikvStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPat
|
||||||
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
// println("list", entry.FullPath, "chunks", len(entry.GetChunks()))
|
||||||
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(iter.Value())); decodeErr != nil {
|
||||||
err = decodeErr
|
err = decodeErr
|
||||||
glog.V(0).Infof("list %s : %v", entry.FullPath, err)
|
glog.V(0).InfofCtx(ctx, "list %s : %v", entry.FullPath, err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err := iter.Next(); !eachEntryFunc(entry) || err != nil {
|
if err := iter.Next(); !eachEntryFunc(entry) || err != nil {
|
||||||
|
|
|
@ -22,19 +22,21 @@ const (
|
||||||
deleteQuery = `
|
deleteQuery = `
|
||||||
PRAGMA TablePathPrefix("%v");
|
PRAGMA TablePathPrefix("%v");
|
||||||
DECLARE $dir_hash AS int64;
|
DECLARE $dir_hash AS int64;
|
||||||
|
DECLARE $directory AS Utf8;
|
||||||
DECLARE $name AS Utf8;
|
DECLARE $name AS Utf8;
|
||||||
|
|
||||||
DELETE FROM ` + asql.DEFAULT_TABLE + `
|
DELETE FROM ` + asql.DEFAULT_TABLE + `
|
||||||
WHERE dir_hash = $dir_hash AND name = $name;`
|
WHERE dir_hash = $dir_hash AND directory = $directory AND name = $name;`
|
||||||
|
|
||||||
findQuery = `
|
findQuery = `
|
||||||
PRAGMA TablePathPrefix("%v");
|
PRAGMA TablePathPrefix("%v");
|
||||||
DECLARE $dir_hash AS int64;
|
DECLARE $dir_hash AS int64;
|
||||||
|
DECLARE $directory AS Utf8;
|
||||||
DECLARE $name AS Utf8;
|
DECLARE $name AS Utf8;
|
||||||
|
|
||||||
SELECT meta
|
SELECT meta
|
||||||
FROM ` + asql.DEFAULT_TABLE + `
|
FROM ` + asql.DEFAULT_TABLE + `
|
||||||
WHERE dir_hash = $dir_hash AND name = $name;`
|
WHERE dir_hash = $dir_hash AND directory = $directory AND name = $name;`
|
||||||
|
|
||||||
deleteFolderChildrenQuery = `
|
deleteFolderChildrenQuery = `
|
||||||
PRAGMA TablePathPrefix("%v");
|
PRAGMA TablePathPrefix("%v");
|
||||||
|
|
|
@ -12,6 +12,9 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/ydb-platform/ydb-go-sdk/v3/query"
|
||||||
|
"github.com/ydb-platform/ydb-go-sdk/v3/table/options"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
|
@ -20,28 +23,35 @@ import (
|
||||||
environ "github.com/ydb-platform/ydb-go-sdk-auth-environ"
|
environ "github.com/ydb-platform/ydb-go-sdk-auth-environ"
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3"
|
"github.com/ydb-platform/ydb-go-sdk/v3"
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table"
|
"github.com/ydb-platform/ydb-go-sdk/v3/table"
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/result"
|
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/result/named"
|
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"
|
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
defaultDialTimeOut = 10
|
defaultDialTimeOut = 10
|
||||||
|
defaultPartitionBySizeEnabled = true
|
||||||
|
defaultPartitionSizeMb = 200
|
||||||
|
defaultPartitionByLoadEnabled = true
|
||||||
|
defaultMinPartitionsCount = 5
|
||||||
|
defaultMaxPartitionsCount = 1000
|
||||||
|
defaultMaxListChunk = 2000
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
roTX = table.TxControl(
|
roQC = query.WithTxControl(query.OnlineReadOnlyTxControl())
|
||||||
table.BeginTx(table.WithOnlineReadOnly()),
|
rwQC = query.WithTxControl(query.DefaultTxControl())
|
||||||
table.CommitTx(),
|
|
||||||
)
|
|
||||||
rwTX = table.DefaultTxControl()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type YdbStore struct {
|
type YdbStore struct {
|
||||||
DB ydb.Connection
|
DB *ydb.Driver
|
||||||
dirBuckets string
|
dirBuckets string
|
||||||
tablePathPrefix string
|
tablePathPrefix string
|
||||||
SupportBucketTable bool
|
SupportBucketTable bool
|
||||||
|
partitionBySizeEnabled options.FeatureFlag
|
||||||
|
partitionSizeMb uint64
|
||||||
|
partitionByLoadEnabled options.FeatureFlag
|
||||||
|
minPartitionsCount uint64
|
||||||
|
maxPartitionsCount uint64
|
||||||
|
maxListChunk int
|
||||||
dbs map[string]bool
|
dbs map[string]bool
|
||||||
dbsLock sync.Mutex
|
dbsLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
@ -55,6 +65,12 @@ func (store *YdbStore) GetName() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
|
func (store *YdbStore) Initialize(configuration util.Configuration, prefix string) (err error) {
|
||||||
|
configuration.SetDefault(prefix+"partitionBySizeEnabled", defaultPartitionBySizeEnabled)
|
||||||
|
configuration.SetDefault(prefix+"partitionSizeMb", defaultPartitionSizeMb)
|
||||||
|
configuration.SetDefault(prefix+"partitionByLoadEnabled", defaultPartitionByLoadEnabled)
|
||||||
|
configuration.SetDefault(prefix+"minPartitionsCount", defaultMinPartitionsCount)
|
||||||
|
configuration.SetDefault(prefix+"maxPartitionsCount", defaultMaxPartitionsCount)
|
||||||
|
configuration.SetDefault(prefix+"maxListChunk", defaultMaxListChunk)
|
||||||
return store.initialize(
|
return store.initialize(
|
||||||
configuration.GetString("filer.options.buckets_folder"),
|
configuration.GetString("filer.options.buckets_folder"),
|
||||||
configuration.GetString(prefix+"dsn"),
|
configuration.GetString(prefix+"dsn"),
|
||||||
|
@ -62,18 +78,37 @@ func (store *YdbStore) Initialize(configuration util.Configuration, prefix strin
|
||||||
configuration.GetBool(prefix+"useBucketPrefix"),
|
configuration.GetBool(prefix+"useBucketPrefix"),
|
||||||
configuration.GetInt(prefix+"dialTimeOut"),
|
configuration.GetInt(prefix+"dialTimeOut"),
|
||||||
configuration.GetInt(prefix+"poolSizeLimit"),
|
configuration.GetInt(prefix+"poolSizeLimit"),
|
||||||
|
configuration.GetBool(prefix+"partitionBySizeEnabled"),
|
||||||
|
uint64(configuration.GetInt(prefix+"partitionSizeMb")),
|
||||||
|
configuration.GetBool(prefix+"partitionByLoadEnabled"),
|
||||||
|
uint64(configuration.GetInt(prefix+"minPartitionsCount")),
|
||||||
|
uint64(configuration.GetInt(prefix+"maxPartitionsCount")),
|
||||||
|
configuration.GetInt(prefix+"maxListChunk"),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix string, useBucketPrefix bool, dialTimeOut int, poolSizeLimit int) (err error) {
|
func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix string, useBucketPrefix bool, dialTimeOut int, poolSizeLimit int, partitionBySizeEnabled bool, partitionSizeMb uint64, partitionByLoadEnabled bool, minPartitionsCount uint64, maxPartitionsCount uint64, maxListChunk int) (err error) {
|
||||||
store.dirBuckets = dirBuckets
|
store.dirBuckets = dirBuckets
|
||||||
store.SupportBucketTable = useBucketPrefix
|
store.SupportBucketTable = useBucketPrefix
|
||||||
|
if partitionBySizeEnabled {
|
||||||
|
store.partitionBySizeEnabled = options.FeatureEnabled
|
||||||
|
} else {
|
||||||
|
store.partitionBySizeEnabled = options.FeatureDisabled
|
||||||
|
}
|
||||||
|
if partitionByLoadEnabled {
|
||||||
|
store.partitionByLoadEnabled = options.FeatureEnabled
|
||||||
|
} else {
|
||||||
|
store.partitionByLoadEnabled = options.FeatureDisabled
|
||||||
|
}
|
||||||
|
store.partitionSizeMb = partitionSizeMb
|
||||||
|
store.minPartitionsCount = minPartitionsCount
|
||||||
|
store.maxPartitionsCount = maxPartitionsCount
|
||||||
|
store.maxListChunk = maxListChunk
|
||||||
if store.SupportBucketTable {
|
if store.SupportBucketTable {
|
||||||
glog.V(0).Infof("enabled BucketPrefix")
|
glog.V(0).Infof("enabled BucketPrefix")
|
||||||
}
|
}
|
||||||
store.dbs = make(map[string]bool)
|
store.dbs = make(map[string]bool)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx := context.Background()
|
||||||
defer cancel()
|
|
||||||
if dialTimeOut == 0 {
|
if dialTimeOut == 0 {
|
||||||
dialTimeOut = defaultDialTimeOut
|
dialTimeOut = defaultDialTimeOut
|
||||||
}
|
}
|
||||||
|
@ -89,11 +124,7 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix
|
||||||
}
|
}
|
||||||
store.DB, err = ydb.Open(ctx, dsn, opts...)
|
store.DB, err = ydb.Open(ctx, dsn, opts...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if store.DB != nil {
|
return fmt.Errorf("can not connect to %s: %w", dsn, err)
|
||||||
_ = store.DB.Close(ctx)
|
|
||||||
store.DB = nil
|
|
||||||
}
|
|
||||||
return fmt.Errorf("can not connect to %s error: %v", dsn, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
store.tablePathPrefix = path.Join(store.DB.Name(), tablePathPrefix)
|
store.tablePathPrefix = path.Join(store.DB.Name(), tablePathPrefix)
|
||||||
|
@ -104,29 +135,27 @@ func (store *YdbStore) initialize(dirBuckets string, dsn string, tablePathPrefix
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) doTxOrDB(ctx context.Context, query *string, params *table.QueryParameters, tc *table.TransactionControl, processResultFunc func(res result.Result) error) (err error) {
|
func (store *YdbStore) doTxOrDB(ctx context.Context, q *string, params *table.QueryParameters, ts query.ExecuteOption, processResultFunc func(res query.Result) error) (err error) {
|
||||||
var res result.Result
|
var res query.Result
|
||||||
if tx, ok := ctx.Value("tx").(table.Transaction); ok {
|
if tx, ok := ctx.Value("tx").(query.Transaction); ok {
|
||||||
res, err = tx.Execute(ctx, *query, params)
|
res, err = tx.Query(ctx, *q, query.WithParameters(params))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("execute transaction: %v", err)
|
return fmt.Errorf("execute transaction: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) {
|
err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
|
||||||
_, res, err = s.Execute(ctx, tc, *query, params)
|
res, err = s.Query(ctx, *q, query.WithParameters(params), ts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("execute statement: %v", err)
|
return fmt.Errorf("execute statement: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
},
|
}, query.WithIdempotent())
|
||||||
table.WithIdempotent(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if res != nil {
|
if res != nil {
|
||||||
defer func() { _ = res.Close() }()
|
defer func() { _ = res.Close(ctx) }()
|
||||||
if processResultFunc != nil {
|
if processResultFunc != nil {
|
||||||
if err = processResultFunc(res); err != nil {
|
if err = processResultFunc(res); err != nil {
|
||||||
return fmt.Errorf("process result: %v", err)
|
return fmt.Errorf("process result: %v", err)
|
||||||
|
@ -148,7 +177,7 @@ func (store *YdbStore) insertOrUpdateEntry(ctx context.Context, entry *filer.Ent
|
||||||
}
|
}
|
||||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||||
fileMeta := FileMeta{util.HashStringToLong(dir), name, *shortDir, meta}
|
fileMeta := FileMeta{util.HashStringToLong(dir), name, *shortDir, meta}
|
||||||
return store.doTxOrDB(ctx, withPragma(tablePathPrefix, upsertQuery), fileMeta.queryParameters(entry.TtlSec), rwTX, nil)
|
return store.doTxOrDB(ctx, withPragma(tablePathPrefix, upsertQuery), fileMeta.queryParameters(entry.TtlSec), rwQC, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
func (store *YdbStore) InsertEntry(ctx context.Context, entry *filer.Entry) (err error) {
|
||||||
|
@ -164,23 +193,29 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e
|
||||||
var data []byte
|
var data []byte
|
||||||
entryFound := false
|
entryFound := false
|
||||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||||
query := withPragma(tablePathPrefix, findQuery)
|
q := withPragma(tablePathPrefix, findQuery)
|
||||||
queryParams := table.NewQueryParameters(
|
queryParams := table.NewQueryParameters(
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||||
|
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
||||||
table.ValueParam("$name", types.UTF8Value(name)))
|
table.ValueParam("$name", types.UTF8Value(name)))
|
||||||
|
|
||||||
err = store.doTxOrDB(ctx, query, queryParams, roTX, func(res result.Result) error {
|
err = store.doTxOrDB(ctx, q, queryParams, roQC, func(res query.Result) error {
|
||||||
if !res.NextResultSet(ctx) || !res.HasNextRow() {
|
for rs, err := range res.ResultSets(ctx) {
|
||||||
return nil
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
for res.NextRow() {
|
for row, err := range rs.Rows(ctx) {
|
||||||
if err = res.ScanNamed(named.OptionalWithDefault("meta", &data)); err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("scanNamed %s : %v", fullpath, err)
|
return err
|
||||||
|
}
|
||||||
|
if scanErr := row.Scan(&data); scanErr != nil {
|
||||||
|
return fmt.Errorf("scan %s: %v", fullpath, scanErr)
|
||||||
}
|
}
|
||||||
entryFound = true
|
entryFound = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return res.Err()
|
}
|
||||||
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -189,37 +224,35 @@ func (store *YdbStore) FindEntry(ctx context.Context, fullpath util.FullPath) (e
|
||||||
return nil, filer_pb.ErrNotFound
|
return nil, filer_pb.ErrNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
entry = &filer.Entry{
|
entry = &filer.Entry{FullPath: fullpath}
|
||||||
FullPath: fullpath,
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||||
|
return nil, fmt.Errorf("decode %s: %v", fullpath, decodeErr)
|
||||||
}
|
}
|
||||||
if err := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
|
||||||
return nil, fmt.Errorf("decode %s : %v", fullpath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return entry, nil
|
return entry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
|
func (store *YdbStore) DeleteEntry(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||||
dir, name := fullpath.DirAndName()
|
dir, name := fullpath.DirAndName()
|
||||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||||
query := withPragma(tablePathPrefix, deleteQuery)
|
q := withPragma(tablePathPrefix, deleteQuery)
|
||||||
glog.V(4).Infof("DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
|
glog.V(4).InfofCtx(ctx, "DeleteEntry %s, tablePathPrefix %s, shortDir %s", fullpath, *tablePathPrefix, *shortDir)
|
||||||
queryParams := table.NewQueryParameters(
|
queryParams := table.NewQueryParameters(
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||||
|
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
||||||
table.ValueParam("$name", types.UTF8Value(name)))
|
table.ValueParam("$name", types.UTF8Value(name)))
|
||||||
|
|
||||||
return store.doTxOrDB(ctx, query, queryParams, rwTX, nil)
|
return store.doTxOrDB(ctx, q, queryParams, rwQC, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
|
func (store *YdbStore) DeleteFolderChildren(ctx context.Context, fullpath util.FullPath) (err error) {
|
||||||
dir := string(fullpath)
|
dir := string(fullpath)
|
||||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||||
query := withPragma(tablePathPrefix, deleteFolderChildrenQuery)
|
q := withPragma(tablePathPrefix, deleteFolderChildrenQuery)
|
||||||
queryParams := table.NewQueryParameters(
|
queryParams := table.NewQueryParameters(
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||||
table.ValueParam("$directory", types.UTF8Value(*shortDir)))
|
table.ValueParam("$directory", types.UTF8Value(*shortDir)))
|
||||||
|
|
||||||
return store.doTxOrDB(ctx, query, queryParams, rwTX, nil)
|
return store.doTxOrDB(ctx, q, queryParams, rwQC, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||||
|
@ -229,72 +262,80 @@ func (store *YdbStore) ListDirectoryEntries(ctx context.Context, dirPath util.Fu
|
||||||
func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
func (store *YdbStore) ListDirectoryPrefixedEntries(ctx context.Context, dirPath util.FullPath, startFileName string, includeStartFile bool, limit int64, prefix string, eachEntryFunc filer.ListEachEntryFunc) (lastFileName string, err error) {
|
||||||
dir := string(dirPath)
|
dir := string(dirPath)
|
||||||
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
tablePathPrefix, shortDir := store.getPrefix(ctx, &dir)
|
||||||
var query *string
|
baseInclusive := withPragma(tablePathPrefix, listInclusiveDirectoryQuery)
|
||||||
if includeStartFile {
|
baseExclusive := withPragma(tablePathPrefix, listDirectoryQuery)
|
||||||
query = withPragma(tablePathPrefix, listInclusiveDirectoryQuery)
|
var entryCount int64
|
||||||
|
var prevFetchedLessThanChunk bool
|
||||||
|
for entryCount < limit {
|
||||||
|
if prevFetchedLessThanChunk {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
var q *string
|
||||||
|
if entryCount == 0 && includeStartFile {
|
||||||
|
q = baseInclusive
|
||||||
} else {
|
} else {
|
||||||
query = withPragma(tablePathPrefix, listDirectoryQuery)
|
q = baseExclusive
|
||||||
}
|
}
|
||||||
truncated := true
|
rest := limit - entryCount
|
||||||
eachEntryFuncIsNotBreake := true
|
chunkLimit := rest
|
||||||
entryCount := int64(0)
|
if chunkLimit > int64(store.maxListChunk) {
|
||||||
for truncated && eachEntryFuncIsNotBreake {
|
chunkLimit = int64(store.maxListChunk)
|
||||||
if lastFileName != "" {
|
|
||||||
startFileName = lastFileName
|
|
||||||
if includeStartFile {
|
|
||||||
query = withPragma(tablePathPrefix, listDirectoryQuery)
|
|
||||||
}
|
}
|
||||||
}
|
var rowCount int64
|
||||||
restLimit := limit - entryCount
|
|
||||||
const maxChunk = int64(1000)
|
|
||||||
chunkLimit := restLimit
|
|
||||||
if chunkLimit > maxChunk {
|
|
||||||
chunkLimit = maxChunk
|
|
||||||
}
|
|
||||||
glog.V(4).Infof("startFileName %s, restLimit %d, chunkLimit %d", startFileName, restLimit, chunkLimit)
|
|
||||||
|
|
||||||
queryParams := table.NewQueryParameters(
|
params := table.NewQueryParameters(
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
table.ValueParam("$dir_hash", types.Int64Value(util.HashStringToLong(*shortDir))),
|
||||||
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
table.ValueParam("$directory", types.UTF8Value(*shortDir)),
|
||||||
table.ValueParam("$start_name", types.UTF8Value(startFileName)),
|
table.ValueParam("$start_name", types.UTF8Value(startFileName)),
|
||||||
table.ValueParam("$prefix", types.UTF8Value(prefix+"%")),
|
table.ValueParam("$prefix", types.UTF8Value(prefix+"%")),
|
||||||
table.ValueParam("$limit", types.Uint64Value(uint64(chunkLimit))),
|
table.ValueParam("$limit", types.Uint64Value(uint64(chunkLimit))),
|
||||||
)
|
)
|
||||||
err = store.doTxOrDB(ctx, query, queryParams, roTX, func(res result.Result) error {
|
|
||||||
|
err := store.doTxOrDB(ctx, q, params, roQC, func(res query.Result) error {
|
||||||
|
for rs, err := range res.ResultSets(ctx) {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for row, err := range rs.Rows(ctx) {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
var name string
|
var name string
|
||||||
var data []byte
|
var data []byte
|
||||||
if !res.NextResultSet(ctx) || !res.HasNextRow() {
|
if scanErr := row.Scan(&name, &data); scanErr != nil {
|
||||||
truncated = false
|
return fmt.Errorf("scan %s: %w", dir, scanErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
lastFileName = name
|
||||||
|
entry := &filer.Entry{FullPath: util.NewFullPath(dir, name)}
|
||||||
|
if decodeErr := entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); decodeErr != nil {
|
||||||
|
return fmt.Errorf("decode entry %s: %w", entry.FullPath, decodeErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !eachEntryFunc(entry) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
truncated = res.CurrentResultSet().Truncated()
|
|
||||||
glog.V(4).Infof("truncated %v, entryCount %d", truncated, entryCount)
|
rowCount++
|
||||||
for res.NextRow() {
|
entryCount++
|
||||||
if err := res.ScanNamed(
|
startFileName = lastFileName
|
||||||
named.OptionalWithDefault("name", &name),
|
|
||||||
named.OptionalWithDefault("meta", &data)); err != nil {
|
if entryCount >= limit {
|
||||||
return fmt.Errorf("list scanNamed %s : %v", dir, err)
|
return nil
|
||||||
}
|
}
|
||||||
glog.V(8).Infof("name %s, fullpath %s", name, util.NewFullPath(dir, name))
|
|
||||||
lastFileName = name
|
|
||||||
entry := &filer.Entry{
|
|
||||||
FullPath: util.NewFullPath(dir, name),
|
|
||||||
}
|
}
|
||||||
if err = entry.DecodeAttributesAndChunks(util.MaybeDecompressData(data)); err != nil {
|
|
||||||
return fmt.Errorf("scan decode %s : %v", entry.FullPath, err)
|
|
||||||
}
|
}
|
||||||
if !eachEntryFunc(entry) {
|
return nil
|
||||||
eachEntryFuncIsNotBreake = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
entryCount += 1
|
|
||||||
}
|
|
||||||
return res.Err()
|
|
||||||
})
|
})
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return lastFileName, err
|
return lastFileName, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rowCount < chunkLimit {
|
||||||
|
prevFetchedLessThanChunk = true
|
||||||
|
}
|
||||||
|
}
|
||||||
return lastFileName, nil
|
return lastFileName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -380,7 +421,7 @@ func (store *YdbStore) OnBucketDeletion(bucket string) {
|
||||||
|
|
||||||
func (store *YdbStore) createTable(ctx context.Context, prefix string) error {
|
func (store *YdbStore) createTable(ctx context.Context, prefix string) error {
|
||||||
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||||
return s.CreateTable(ctx, path.Join(prefix, abstract_sql.DEFAULT_TABLE), createTableOptions()...)
|
return s.CreateTable(ctx, path.Join(prefix, abstract_sql.DEFAULT_TABLE), store.createTableOptions()...)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +434,7 @@ func (store *YdbStore) deleteTable(ctx context.Context, prefix string) error {
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
glog.V(4).Infof("deleted table %s", prefix)
|
glog.V(4).InfofCtx(ctx, "deleted table %s", prefix)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -406,11 +447,11 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
||||||
}
|
}
|
||||||
|
|
||||||
prefixBuckets := store.dirBuckets + "/"
|
prefixBuckets := store.dirBuckets + "/"
|
||||||
glog.V(4).Infof("dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
|
glog.V(4).InfofCtx(ctx, "dir: %s, prefixBuckets: %s", *dir, prefixBuckets)
|
||||||
if strings.HasPrefix(*dir, prefixBuckets) {
|
if strings.HasPrefix(*dir, prefixBuckets) {
|
||||||
// detect bucket
|
// detect bucket
|
||||||
bucketAndDir := (*dir)[len(prefixBuckets):]
|
bucketAndDir := (*dir)[len(prefixBuckets):]
|
||||||
glog.V(4).Infof("bucketAndDir: %s", bucketAndDir)
|
glog.V(4).InfofCtx(ctx, "bucketAndDir: %s", bucketAndDir)
|
||||||
var bucket string
|
var bucket string
|
||||||
if t := strings.Index(bucketAndDir, "/"); t > 0 {
|
if t := strings.Index(bucketAndDir, "/"); t > 0 {
|
||||||
bucket = bucketAndDir[:t]
|
bucket = bucketAndDir[:t]
|
||||||
|
@ -424,16 +465,22 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
||||||
store.dbsLock.Lock()
|
store.dbsLock.Lock()
|
||||||
defer store.dbsLock.Unlock()
|
defer store.dbsLock.Unlock()
|
||||||
|
|
||||||
tablePathPrefixWithBucket := path.Join(store.tablePathPrefix, bucket)
|
|
||||||
if _, found := store.dbs[bucket]; !found {
|
if _, found := store.dbs[bucket]; !found {
|
||||||
if err := store.createTable(ctx, tablePathPrefixWithBucket); err == nil {
|
glog.V(4).InfofCtx(ctx, "bucket %q not in cache, verifying existence via DescribeTable", bucket)
|
||||||
|
tablePath := path.Join(store.tablePathPrefix, bucket, abstract_sql.DEFAULT_TABLE)
|
||||||
|
err2 := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||||
|
_, err3 := s.DescribeTable(ctx, tablePath)
|
||||||
|
return err3
|
||||||
|
})
|
||||||
|
if err2 != nil {
|
||||||
|
glog.V(4).InfofCtx(ctx, "bucket %q not found (DescribeTable %s failed)", bucket, tablePath)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
glog.V(4).InfofCtx(ctx, "bucket %q exists, adding to cache", bucket)
|
||||||
store.dbs[bucket] = true
|
store.dbs[bucket] = true
|
||||||
glog.V(4).Infof("created table %s", tablePathPrefixWithBucket)
|
|
||||||
} else {
|
|
||||||
glog.Errorf("createTable %s: %v", tablePathPrefixWithBucket, err)
|
|
||||||
}
|
}
|
||||||
}
|
bucketPrefix := path.Join(store.tablePathPrefix, bucket)
|
||||||
tablePathPrefix = &tablePathPrefixWithBucket
|
tablePathPrefix = &bucketPrefix
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -441,25 +488,25 @@ func (store *YdbStore) getPrefix(ctx context.Context, dir *string) (tablePathPre
|
||||||
func (store *YdbStore) ensureTables(ctx context.Context) error {
|
func (store *YdbStore) ensureTables(ctx context.Context) error {
|
||||||
prefixFull := store.tablePathPrefix
|
prefixFull := store.tablePathPrefix
|
||||||
|
|
||||||
glog.V(4).Infof("creating base table %s", prefixFull)
|
glog.V(4).InfofCtx(ctx, "creating base table %s", prefixFull)
|
||||||
baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE)
|
baseTable := path.Join(prefixFull, abstract_sql.DEFAULT_TABLE)
|
||||||
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||||
return s.CreateTable(ctx, baseTable, createTableOptions()...)
|
return s.CreateTable(ctx, baseTable, store.createTableOptions()...)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("failed to create base table %s: %v", baseTable, err)
|
return fmt.Errorf("failed to create base table %s: %v", baseTable, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("creating bucket tables")
|
glog.V(4).InfofCtx(ctx, "creating bucket tables")
|
||||||
if store.SupportBucketTable {
|
if store.SupportBucketTable {
|
||||||
store.dbsLock.Lock()
|
store.dbsLock.Lock()
|
||||||
defer store.dbsLock.Unlock()
|
defer store.dbsLock.Unlock()
|
||||||
for bucket := range store.dbs {
|
for bucket := range store.dbs {
|
||||||
glog.V(4).Infof("creating bucket table %s", bucket)
|
glog.V(4).InfofCtx(ctx, "creating bucket table %s", bucket)
|
||||||
bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE)
|
bucketTable := path.Join(prefixFull, bucket, abstract_sql.DEFAULT_TABLE)
|
||||||
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
if err := store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
||||||
return s.CreateTable(ctx, bucketTable, createTableOptions()...)
|
return s.CreateTable(ctx, bucketTable, store.createTableOptions()...)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
glog.Errorf("failed to create bucket table %s: %v", bucketTable, err)
|
glog.ErrorfCtx(ctx, "failed to create bucket table %s: %v", bucketTable, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,48 +9,54 @@ import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
"github.com/seaweedfs/seaweedfs/weed/filer/abstract_sql"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||||
|
"github.com/ydb-platform/ydb-go-sdk/v3/query"
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table"
|
"github.com/ydb-platform/ydb-go-sdk/v3/table"
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/result/named"
|
|
||||||
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"
|
"github.com/ydb-platform/ydb-go-sdk/v3/table/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (store *YdbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
func (store *YdbStore) KvPut(ctx context.Context, key []byte, value []byte) (err error) {
|
||||||
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
||||||
fileMeta := FileMeta{dirHash, name, dirStr, value}
|
fileMeta := FileMeta{dirHash, name, dirStr, value}
|
||||||
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) {
|
return store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
|
||||||
_, _, err = s.Execute(ctx, rwTX, *withPragma(&store.tablePathPrefix, upsertQuery),
|
_, err = s.Query(ctx, *withPragma(&store.tablePathPrefix, upsertQuery),
|
||||||
fileMeta.queryParameters(0))
|
query.WithParameters(fileMeta.queryParameters(0)), rwQC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("kv put execute %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
return fmt.Errorf("kv put execute %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
}, query.WithIdempotent())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
|
func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err error) {
|
||||||
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
||||||
valueFound := false
|
valueFound := false
|
||||||
err = store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) error {
|
err = store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) error {
|
||||||
_, res, err := s.Execute(ctx, roTX, *withPragma(&store.tablePathPrefix, findQuery),
|
res, err := s.Query(ctx, *withPragma(&store.tablePathPrefix, findQuery),
|
||||||
table.NewQueryParameters(
|
query.WithParameters(table.NewQueryParameters(
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(dirHash)),
|
table.ValueParam("$dir_hash", types.Int64Value(dirHash)),
|
||||||
table.ValueParam("$name", types.UTF8Value(name))))
|
table.ValueParam("$directory", types.UTF8Value(dirStr)),
|
||||||
|
table.ValueParam("$name", types.UTF8Value(name)))), roQC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("kv get execute %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
return fmt.Errorf("kv get execute %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||||
}
|
}
|
||||||
defer func() { _ = res.Close() }()
|
defer func() { _ = res.Close(ctx) }()
|
||||||
if !res.NextResultSet(ctx) || !res.HasNextRow() {
|
for rs, err := range res.ResultSets(ctx) {
|
||||||
return nil
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
for res.NextRow() {
|
for row, err := range rs.Rows(ctx) {
|
||||||
if err := res.ScanNamed(named.OptionalWithDefault("meta", &value)); err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("scanNamed %s : %v", util.NewFullPath(dirStr, name).Name(), err)
|
return err
|
||||||
|
}
|
||||||
|
if err := row.Scan(&value); err != nil {
|
||||||
|
return fmt.Errorf("scan %s : %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||||
}
|
}
|
||||||
valueFound = true
|
valueFound = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return res.Err()
|
}
|
||||||
})
|
return nil
|
||||||
|
}, query.WithIdempotent())
|
||||||
|
|
||||||
if !valueFound {
|
if !valueFound {
|
||||||
return nil, filer.ErrKvNotFound
|
return nil, filer.ErrKvNotFound
|
||||||
|
@ -61,15 +67,16 @@ func (store *YdbStore) KvGet(ctx context.Context, key []byte) (value []byte, err
|
||||||
|
|
||||||
func (store *YdbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
func (store *YdbStore) KvDelete(ctx context.Context, key []byte) (err error) {
|
||||||
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
dirStr, dirHash, name := abstract_sql.GenDirAndName(key)
|
||||||
return store.DB.Table().Do(ctx, func(ctx context.Context, s table.Session) (err error) {
|
return store.DB.Query().Do(ctx, func(ctx context.Context, s query.Session) (err error) {
|
||||||
_, _, err = s.Execute(ctx, rwTX, *withPragma(&store.tablePathPrefix, deleteQuery),
|
_, err = s.Query(ctx, *withPragma(&store.tablePathPrefix, deleteQuery),
|
||||||
table.NewQueryParameters(
|
query.WithParameters(table.NewQueryParameters(
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(dirHash)),
|
table.ValueParam("$dir_hash", types.Int64Value(dirHash)),
|
||||||
table.ValueParam("$name", types.UTF8Value(name))))
|
table.ValueParam("$directory", types.UTF8Value(dirStr)),
|
||||||
|
table.ValueParam("$name", types.UTF8Value(name)))), rwQC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("kv delete %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
return fmt.Errorf("kv delete %s: %v", util.NewFullPath(dirStr, name).Name(), err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
}, query.WithIdempotent())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,8 @@ func TestStore(t *testing.T) {
|
||||||
// to set up local env
|
// to set up local env
|
||||||
if false {
|
if false {
|
||||||
store := &YdbStore{}
|
store := &YdbStore{}
|
||||||
store.initialize("/buckets", "grpc://localhost:2136/?database=local", "seaweedfs", true, 10, 50)
|
store.initialize("/buckets", "grpc://localhost:2136/?database=local", "seaweedfs", true, 10, 50,
|
||||||
|
true, 200, true, 5, 1000, 2000)
|
||||||
store_test.TestFilerStore(t, store)
|
store_test.TestFilerStore(t, store)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,26 +30,35 @@ func (fm *FileMeta) queryParameters(ttlSec int32) *table.QueryParameters {
|
||||||
table.ValueParam("$dir_hash", types.Int64Value(fm.DirHash)),
|
table.ValueParam("$dir_hash", types.Int64Value(fm.DirHash)),
|
||||||
table.ValueParam("$directory", types.UTF8Value(fm.Directory)),
|
table.ValueParam("$directory", types.UTF8Value(fm.Directory)),
|
||||||
table.ValueParam("$name", types.UTF8Value(fm.Name)),
|
table.ValueParam("$name", types.UTF8Value(fm.Name)),
|
||||||
table.ValueParam("$meta", types.StringValue(fm.Meta)),
|
table.ValueParam("$meta", types.BytesValue(fm.Meta)),
|
||||||
table.ValueParam("$expire_at", expireAtValue))
|
table.ValueParam("$expire_at", expireAtValue))
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTableOptions() []options.CreateTableOption {
|
func (store *YdbStore) createTableOptions() []options.CreateTableOption {
|
||||||
columnUnit := options.TimeToLiveUnitSeconds
|
columnUnit := options.TimeToLiveUnitSeconds
|
||||||
return []options.CreateTableOption{
|
return []options.CreateTableOption{
|
||||||
options.WithColumn("dir_hash", types.Optional(types.TypeInt64)),
|
options.WithColumn("dir_hash", types.TypeInt64),
|
||||||
options.WithColumn("directory", types.Optional(types.TypeUTF8)),
|
options.WithColumn("directory", types.TypeUTF8),
|
||||||
options.WithColumn("name", types.Optional(types.TypeUTF8)),
|
options.WithColumn("name", types.TypeUTF8),
|
||||||
options.WithColumn("meta", types.Optional(types.TypeString)),
|
options.WithColumn("meta", types.TypeString),
|
||||||
options.WithColumn("expire_at", types.Optional(types.TypeUint32)),
|
options.WithColumn("expire_at", types.Optional(types.TypeUint32)),
|
||||||
options.WithPrimaryKeyColumn("dir_hash", "name"),
|
options.WithPrimaryKeyColumn("dir_hash", "directory", "name"),
|
||||||
options.WithTimeToLiveSettings(options.TimeToLiveSettings{
|
options.WithTimeToLiveSettings(options.TimeToLiveSettings{
|
||||||
ColumnName: "expire_at",
|
ColumnName: "expire_at",
|
||||||
ColumnUnit: &columnUnit,
|
ColumnUnit: &columnUnit,
|
||||||
Mode: options.TimeToLiveModeValueSinceUnixEpoch},
|
Mode: options.TimeToLiveModeValueSinceUnixEpoch},
|
||||||
),
|
),
|
||||||
|
options.WithPartitioningSettings(
|
||||||
|
options.WithPartitioningBy([]string{"dir_hash", "name"}),
|
||||||
|
options.WithPartitioningBySize(store.partitionBySizeEnabled),
|
||||||
|
options.WithPartitionSizeMb(store.partitionSizeMb),
|
||||||
|
options.WithPartitioningByLoad(store.partitionByLoadEnabled),
|
||||||
|
options.WithMinPartitionsCount(store.minPartitionsCount),
|
||||||
|
options.WithMaxPartitionsCount(store.maxPartitionsCount),
|
||||||
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func withPragma(prefix *string, query string) *string {
|
func withPragma(prefix *string, query string) *string {
|
||||||
queryWithPragma := fmt.Sprintf(query, *prefix)
|
queryWithPragma := fmt.Sprintf(query, *prefix)
|
||||||
return &queryWithPragma
|
return &queryWithPragma
|
||||||
|
|
246
weed/glog/glog_ctx.go
Normal file
246
weed/glog/glog_ctx.go
Normal file
|
@ -0,0 +1,246 @@
|
||||||
|
package glog
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"sync/atomic"
|
||||||
|
|
||||||
|
reqid "github.com/seaweedfs/seaweedfs/weed/util/request_id"
|
||||||
|
)
|
||||||
|
|
||||||
|
const requestIDField = "request_id"
|
||||||
|
|
||||||
|
// formatMetaTag returns a formatted request ID tag from the context,
|
||||||
|
// like "request_id:abc123". Returns an empty string if no request ID is found.
|
||||||
|
func formatMetaTag(ctx context.Context) string {
|
||||||
|
if requestID := reqid.Get(ctx); requestID != "" {
|
||||||
|
return fmt.Sprintf("%s:%s", requestIDField, requestID)
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// InfoCtx is a context-aware alternative to Verbose.Info.
|
||||||
|
// Logs to the INFO log, guarded by the value of v, and prepends a request ID from the context if present.
|
||||||
|
// Arguments are handled in the manner of fmt.Print.
|
||||||
|
func (v Verbose) InfoCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if !v {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.print(infoLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InfolnCtx is a context-aware alternative to Verbose.Infoln.
|
||||||
|
// Logs to the INFO log, prepending a request ID from the context if it exists.
|
||||||
|
// Arguments are handled in the manner of fmt.Println.
|
||||||
|
func (v Verbose) InfolnCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if !v {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.println(infoLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InfofCtx is a context-aware alternative to Verbose.Infof.
|
||||||
|
// Logs to the INFO log, guarded by the value of v, and prepends a request ID from the context if present.
|
||||||
|
// Arguments are handled in the manner of fmt.Printf.
|
||||||
|
func (v Verbose) InfofCtx(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
if !v {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
format = metaTag + " " + format
|
||||||
|
}
|
||||||
|
logging.printf(infoLog, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InfofCtx logs a formatted message at info level, prepending a request ID from
|
||||||
|
// the context if it exists. This is a context-aware alternative to Infof.
|
||||||
|
func InfofCtx(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
format = metaTag + " " + format
|
||||||
|
}
|
||||||
|
logging.printf(infoLog, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InfoCtx logs a message at info level, prepending a request ID from the context
|
||||||
|
// if it exists. This is a context-aware alternative to Info.
|
||||||
|
func InfoCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.print(infoLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarningCtx logs to the WARNING and INFO logs.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to Warning.
|
||||||
|
func WarningCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.print(warningLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarningDepthCtx logs to the WARNING and INFO logs with a custom call depth.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to WarningDepth.
|
||||||
|
func WarningDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.printDepth(warningLog, depth, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarninglnCtx logs to the WARNING and INFO logs.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||||
|
// This is a context-aware alternative to Warningln.
|
||||||
|
func WarninglnCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.println(warningLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WarningfCtx logs to the WARNING and INFO logs.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||||
|
// This is a context-aware alternative to Warningf.
|
||||||
|
func WarningfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
format = metaTag + " " + format
|
||||||
|
}
|
||||||
|
logging.printf(warningLog, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorCtx logs to the ERROR, WARNING, and INFO logs.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to Error.
|
||||||
|
func ErrorCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.print(errorLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorDepthCtx logs to the ERROR, WARNING, and INFO logs with a custom call depth.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to ErrorDepth.
|
||||||
|
func ErrorDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.printDepth(errorLog, depth, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorlnCtx logs to the ERROR, WARNING, and INFO logs.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||||
|
// This is a context-aware alternative to Errorln.
|
||||||
|
func ErrorlnCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.println(errorLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorfCtx logs to the ERROR, WARNING, and INFO logs.
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||||
|
// This is a context-aware alternative to Errorf.
|
||||||
|
func ErrorfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
format = metaTag + " " + format
|
||||||
|
}
|
||||||
|
logging.printf(errorLog, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FatalCtx logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||||
|
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to Fatal.
|
||||||
|
func FatalCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.print(fatalLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FatalDepthCtx logs to the FATAL, ERROR, WARNING, and INFO logs with a custom call depth,
|
||||||
|
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to FatalDepth.
|
||||||
|
func FatalDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.printDepth(fatalLog, depth, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FatallnCtx logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||||
|
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||||
|
// This is a context-aware alternative to Fatalln.
|
||||||
|
func FatallnCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.println(fatalLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FatalfCtx logs to the FATAL, ERROR, WARNING, and INFO logs,
|
||||||
|
// including a stack trace of all running goroutines, then calls os.Exit(255).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||||
|
// This is a context-aware alternative to Fatalf.
|
||||||
|
func FatalfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
format = metaTag + " " + format
|
||||||
|
}
|
||||||
|
logging.printf(fatalLog, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to ExitCtx
|
||||||
|
func ExitCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.print(fatalLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitDepthCtx logs to the FATAL, ERROR, WARNING, and INFO logs with a custom call depth,
|
||||||
|
// then calls os.Exit(1). Prepends a request ID from the context if it exists.
|
||||||
|
// Arguments are handled in the manner of fmt.Print.
|
||||||
|
// This is a context-aware alternative to ExitDepth.
|
||||||
|
func ExitDepthCtx(ctx context.Context, depth int, args ...interface{}) {
|
||||||
|
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.printDepth(fatalLog, depth, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitlnCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Println.
|
||||||
|
// This is a context-aware alternative to Exitln.
|
||||||
|
func ExitlnCtx(ctx context.Context, args ...interface{}) {
|
||||||
|
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
args = append([]interface{}{metaTag}, args...)
|
||||||
|
}
|
||||||
|
logging.println(fatalLog, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExitfCtx logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
|
||||||
|
// Prepends a request ID from the context if it exists. Arguments are handled in the manner of fmt.Printf.
|
||||||
|
// This is a context-aware alternative to Exitf.
|
||||||
|
func ExitfCtx(ctx context.Context, format string, args ...interface{}) {
|
||||||
|
atomic.StoreUint32(&fatalNoStacks, 1)
|
||||||
|
if metaTag := formatMetaTag(ctx); metaTag != "" {
|
||||||
|
format = metaTag + " " + format
|
||||||
|
}
|
||||||
|
logging.printf(fatalLog, format, args...)
|
||||||
|
}
|
|
@ -4,6 +4,10 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/parquet-go/parquet-go"
|
"github.com/parquet-go/parquet-go"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/filer"
|
"github.com/seaweedfs/seaweedfs/weed/filer"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
|
"github.com/seaweedfs/seaweedfs/weed/mq/schema"
|
||||||
|
@ -13,9 +17,6 @@ import (
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
"github.com/seaweedfs/seaweedfs/weed/util/chunk_cache"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
|
"github.com/seaweedfs/seaweedfs/weed/util/log_buffer"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -42,10 +43,6 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
|
||||||
WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes).
|
WithField(SW_COLUMN_NAME_KEY, schema.TypeBytes).
|
||||||
RecordTypeEnd()
|
RecordTypeEnd()
|
||||||
|
|
||||||
parquetSchema, err := schema.ToParquetSchema(t.Name, recordType)
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
parquetLevels, err := schema.ToParquetLevels(recordType)
|
parquetLevels, err := schema.ToParquetLevels(recordType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -61,11 +58,12 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
|
||||||
readerAt := filer.NewChunkReaderAtFromClient(readerCache, chunkViews, int64(fileSize))
|
readerAt := filer.NewChunkReaderAtFromClient(readerCache, chunkViews, int64(fileSize))
|
||||||
|
|
||||||
// create parquet reader
|
// create parquet reader
|
||||||
parquetReader := parquet.NewReader(readerAt, parquetSchema)
|
parquetReader := parquet.NewReader(readerAt)
|
||||||
rows := make([]parquet.Row, 128)
|
rows := make([]parquet.Row, 128)
|
||||||
for {
|
for {
|
||||||
rowCount, readErr := parquetReader.ReadRows(rows)
|
rowCount, readErr := parquetReader.ReadRows(rows)
|
||||||
|
|
||||||
|
// Process the rows first, even if EOF is returned
|
||||||
for i := 0; i < rowCount; i++ {
|
for i := 0; i < rowCount; i++ {
|
||||||
row := rows[i]
|
row := rows[i]
|
||||||
// convert parquet row to schema_pb.RecordValue
|
// convert parquet row to schema_pb.RecordValue
|
||||||
|
@ -99,12 +97,16 @@ func GenParquetReadFunc(filerClient filer_pb.FilerClient, t topic.Topic, p topic
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check for end conditions after processing rows
|
||||||
if readErr != nil {
|
if readErr != nil {
|
||||||
if readErr == io.EOF {
|
if readErr == io.EOF {
|
||||||
return processedTsNs, nil
|
return processedTsNs, nil
|
||||||
}
|
}
|
||||||
return processedTsNs, readErr
|
return processedTsNs, readErr
|
||||||
}
|
}
|
||||||
|
if rowCount == 0 {
|
||||||
|
return processedTsNs, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,12 +2,13 @@ package schema
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/parquet-go/parquet-go"
|
|
||||||
"github.com/parquet-go/parquet-go/compress/zstd"
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/parquet-go/parquet-go"
|
||||||
|
"github.com/parquet-go/parquet-go/compress/zstd"
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/pb/schema_pb"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestWriteReadParquet(t *testing.T) {
|
func TestWriteReadParquet(t *testing.T) {
|
||||||
|
@ -125,16 +126,25 @@ func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parque
|
||||||
t.Fatalf("os.Open failed: %v", err)
|
t.Fatalf("os.Open failed: %v", err)
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
reader := parquet.NewReader(file, parquetSchema)
|
|
||||||
|
// Get file info to determine size
|
||||||
|
fileInfo, err := file.Stat()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("file.Stat failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a parquet file from the opened file
|
||||||
|
parquetFile, err := parquet.OpenFile(file, fileInfo.Size())
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("parquet.OpenFile failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := parquet.NewReader(parquetFile)
|
||||||
rows := make([]parquet.Row, 128)
|
rows := make([]parquet.Row, 128)
|
||||||
for {
|
for {
|
||||||
rowCount, err := reader.ReadRows(rows)
|
rowCount, err := reader.ReadRows(rows)
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
// Process the rows first, even if EOF is returned
|
||||||
break
|
|
||||||
}
|
|
||||||
t.Fatalf("reader.Read failed: %v", err)
|
|
||||||
}
|
|
||||||
for i := 0; i < rowCount; i++ {
|
for i := 0; i < rowCount; i++ {
|
||||||
row := rows[i]
|
row := rows[i]
|
||||||
// convert parquet row to schema_pb.RecordValue
|
// convert parquet row to schema_pb.RecordValue
|
||||||
|
@ -147,6 +157,17 @@ func testReadingParquetFile(t *testing.T, filename string, parquetSchema *parque
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
total += rowCount
|
total += rowCount
|
||||||
|
|
||||||
|
// Check for end conditions after processing rows
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Fatalf("reader.Read failed: %v", err)
|
||||||
|
}
|
||||||
|
if rowCount == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
fmt.Printf("total: %v\n", total)
|
fmt.Printf("total: %v\n", total)
|
||||||
return
|
return
|
||||||
|
|
|
@ -3,9 +3,10 @@ package operation
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb"
|
|
||||||
"io"
|
"io"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/pb"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||||
|
@ -53,6 +54,10 @@ func TailVolumeFromSource(volumeServer pb.ServerAddress, grpcDialOption grpc.Dia
|
||||||
|
|
||||||
needleHeader := resp.NeedleHeader
|
needleHeader := resp.NeedleHeader
|
||||||
needleBody := resp.NeedleBody
|
needleBody := resp.NeedleBody
|
||||||
|
version := needle.Version(resp.Version)
|
||||||
|
if version == 0 {
|
||||||
|
version = needle.GetCurrentVersion()
|
||||||
|
}
|
||||||
|
|
||||||
if len(needleHeader) == 0 {
|
if len(needleHeader) == 0 {
|
||||||
continue
|
continue
|
||||||
|
@ -72,7 +77,7 @@ func TailVolumeFromSource(volumeServer pb.ServerAddress, grpcDialOption grpc.Dia
|
||||||
|
|
||||||
n := new(needle.Needle)
|
n := new(needle.Needle)
|
||||||
n.ParseNeedleHeader(needleHeader)
|
n.ParseNeedleHeader(needleHeader)
|
||||||
err = n.ReadNeedleBodyBytes(needleBody, needle.CurrentVersion)
|
err = n.ReadNeedleBodyBytes(needleBody, version)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/valyala/bytebufferpool"
|
|
||||||
"io"
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"mime/multipart"
|
"mime/multipart"
|
||||||
|
@ -16,6 +15,9 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
|
||||||
|
"github.com/valyala/bytebufferpool"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/filer_pb"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/security"
|
"github.com/seaweedfs/seaweedfs/weed/security"
|
||||||
|
@ -187,7 +189,7 @@ func (uploader *Uploader) retriedUploadData(ctx context.Context, data []byte, op
|
||||||
uploadResult.RetryCount = i
|
uploadResult.RetryCount = i
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
glog.Warningf("uploading %d to %s: %v", i, option.UploadUrl, err)
|
glog.WarningfCtx(ctx, "uploading %d to %s: %v", i, option.UploadUrl, err)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -329,16 +331,16 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
||||||
|
|
||||||
file_writer, cp_err := body_writer.CreatePart(h)
|
file_writer, cp_err := body_writer.CreatePart(h)
|
||||||
if cp_err != nil {
|
if cp_err != nil {
|
||||||
glog.V(0).Infoln("error creating form file", cp_err.Error())
|
glog.V(0).InfolnCtx(ctx, "error creating form file", cp_err.Error())
|
||||||
return nil, cp_err
|
return nil, cp_err
|
||||||
}
|
}
|
||||||
if err := fillBufferFunction(file_writer); err != nil {
|
if err := fillBufferFunction(file_writer); err != nil {
|
||||||
glog.V(0).Infoln("error copying data", err)
|
glog.V(0).InfolnCtx(ctx, "error copying data", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
content_type := body_writer.FormDataContentType()
|
content_type := body_writer.FormDataContentType()
|
||||||
if err := body_writer.Close(); err != nil {
|
if err := body_writer.Close(); err != nil {
|
||||||
glog.V(0).Infoln("error closing body", err)
|
glog.V(0).InfolnCtx(ctx, "error closing body", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if option.BytesBuffer == nil {
|
if option.BytesBuffer == nil {
|
||||||
|
@ -348,7 +350,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
||||||
}
|
}
|
||||||
req, postErr := http.NewRequest(http.MethodPost, option.UploadUrl, reqReader)
|
req, postErr := http.NewRequest(http.MethodPost, option.UploadUrl, reqReader)
|
||||||
if postErr != nil {
|
if postErr != nil {
|
||||||
glog.V(1).Infof("create upload request %s: %v", option.UploadUrl, postErr)
|
glog.V(1).InfofCtx(ctx, "create upload request %s: %v", option.UploadUrl, postErr)
|
||||||
return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr)
|
return nil, fmt.Errorf("create upload request %s: %v", option.UploadUrl, postErr)
|
||||||
}
|
}
|
||||||
req.Header.Set("Content-Type", content_type)
|
req.Header.Set("Content-Type", content_type)
|
||||||
|
@ -359,7 +361,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
||||||
req.Header.Set("Authorization", "BEARER "+string(option.Jwt))
|
req.Header.Set("Authorization", "BEARER "+string(option.Jwt))
|
||||||
}
|
}
|
||||||
|
|
||||||
util.ReqWithRequestId(req, ctx)
|
request_id.InjectToRequest(ctx, req)
|
||||||
|
|
||||||
// print("+")
|
// print("+")
|
||||||
resp, post_err := uploader.httpClient.Do(req)
|
resp, post_err := uploader.httpClient.Do(req)
|
||||||
|
@ -367,7 +369,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
||||||
if post_err != nil {
|
if post_err != nil {
|
||||||
if strings.Contains(post_err.Error(), "connection reset by peer") ||
|
if strings.Contains(post_err.Error(), "connection reset by peer") ||
|
||||||
strings.Contains(post_err.Error(), "use of closed network connection") {
|
strings.Contains(post_err.Error(), "use of closed network connection") {
|
||||||
glog.V(1).Infof("repeat error upload request %s: %v", option.UploadUrl, postErr)
|
glog.V(1).InfofCtx(ctx, "repeat error upload request %s: %v", option.UploadUrl, postErr)
|
||||||
stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc()
|
stats.FilerHandlerCounter.WithLabelValues(stats.RepeatErrorUploadContent).Inc()
|
||||||
resp, post_err = uploader.httpClient.Do(req)
|
resp, post_err = uploader.httpClient.Do(req)
|
||||||
defer util_http.CloseResponse(resp)
|
defer util_http.CloseResponse(resp)
|
||||||
|
@ -392,7 +394,7 @@ func (uploader *Uploader) upload_content(ctx context.Context, fillBufferFunction
|
||||||
|
|
||||||
unmarshal_err := json.Unmarshal(resp_body, &ret)
|
unmarshal_err := json.Unmarshal(resp_body, &ret)
|
||||||
if unmarshal_err != nil {
|
if unmarshal_err != nil {
|
||||||
glog.Errorf("unmarshal %s: %v", option.UploadUrl, string(resp_body))
|
glog.ErrorfCtx(ctx, "unmarshal %s: %v", option.UploadUrl, string(resp_body))
|
||||||
return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err)
|
return nil, fmt.Errorf("unmarshal %v: %v", option.UploadUrl, unmarshal_err)
|
||||||
}
|
}
|
||||||
if ret.Error != "" {
|
if ret.Error != "" {
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -39,7 +39,7 @@ func GetEntry(ctx context.Context, filerClient FilerClient, fullFilePath util.Fu
|
||||||
// glog.V(3).Infof("read %s request: %v", fullFilePath, request)
|
// glog.V(3).Infof("read %s request: %v", fullFilePath, request)
|
||||||
resp, err := LookupEntry(ctx, client, request)
|
resp, err := LookupEntry(ctx, client, request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(3).Infof("read %s %v: %v", fullFilePath, resp, err)
|
glog.V(3).InfofCtx(ctx, "read %s %v: %v", fullFilePath, resp, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ func doSeaweedList(ctx context.Context, client SeaweedFilerClient, fullDirPath u
|
||||||
InclusiveStartFrom: inclusive,
|
InclusiveStartFrom: inclusive,
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("read directory: %v", request)
|
glog.V(4).InfofCtx(ctx, "read directory: %v", request)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
stream, err := client.ListEntries(ctx, request)
|
stream, err := client.ListEntries(ctx, request)
|
||||||
|
@ -165,14 +165,14 @@ func Exists(ctx context.Context, filerClient FilerClient, parentDirectoryPath st
|
||||||
Name: entryName,
|
Name: entryName,
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
glog.V(4).InfofCtx(ctx, "exists entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
||||||
resp, err := LookupEntry(ctx, client, request)
|
resp, err := LookupEntry(ctx, client, request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == ErrNotFound {
|
if err == ErrNotFound {
|
||||||
exists = false
|
exists = false
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
glog.V(0).Infof("exists entry %v: %v", request, err)
|
glog.V(0).InfofCtx(ctx, "exists entry %v: %v", request, err)
|
||||||
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
return fmt.Errorf("exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,9 +193,9 @@ func Touch(ctx context.Context, filerClient FilerClient, parentDirectoryPath str
|
||||||
Entry: entry,
|
Entry: entry,
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(4).Infof("touch entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
glog.V(4).InfofCtx(ctx, "touch entry %v/%v: %v", parentDirectoryPath, entryName, request)
|
||||||
if err := UpdateEntry(ctx, client, request); err != nil {
|
if err := UpdateEntry(ctx, client, request); err != nil {
|
||||||
glog.V(0).Infof("touch exists entry %v: %v", request, err)
|
glog.V(0).InfofCtx(ctx, "touch exists entry %v: %v", request, err)
|
||||||
return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
return fmt.Errorf("touch exists entry %s/%s: %v", parentDirectoryPath, entryName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,9 +232,9 @@ func DoMkdir(ctx context.Context, client SeaweedFilerClient, parentDirectoryPath
|
||||||
Entry: entry,
|
Entry: entry,
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(1).Infof("mkdir: %v", request)
|
glog.V(1).InfofCtx(ctx, "mkdir: %v", request)
|
||||||
if err := CreateEntry(ctx, client, request); err != nil {
|
if err := CreateEntry(ctx, client, request); err != nil {
|
||||||
glog.V(0).Infof("mkdir %v: %v", request, err)
|
glog.V(0).InfofCtx(ctx, "mkdir %v: %v", request, err)
|
||||||
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
|
return fmt.Errorf("mkdir %s/%s: %v", parentDirectoryPath, dirName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -266,9 +266,9 @@ func MkFile(ctx context.Context, filerClient FilerClient, parentDirectoryPath st
|
||||||
Entry: entry,
|
Entry: entry,
|
||||||
}
|
}
|
||||||
|
|
||||||
glog.V(1).Infof("create file: %s/%s", parentDirectoryPath, fileName)
|
glog.V(1).InfofCtx(ctx, "create file: %s/%s", parentDirectoryPath, fileName)
|
||||||
if err := CreateEntry(ctx, client, request); err != nil {
|
if err := CreateEntry(ctx, client, request); err != nil {
|
||||||
glog.V(0).Infof("create file %v:%v", request, err)
|
glog.V(0).InfofCtx(ctx, "create file %v:%v", request, err)
|
||||||
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
|
return fmt.Errorf("create file %s/%s: %v", parentDirectoryPath, fileName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v5.28.3
|
// - protoc v5.29.3
|
||||||
// source: filer.proto
|
// source: filer.proto
|
||||||
|
|
||||||
package filer_pb
|
package filer_pb
|
||||||
|
|
|
@ -111,11 +111,11 @@ func AfterEntryDeserialization(chunks []*FileChunk) {
|
||||||
func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error {
|
func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *CreateEntryRequest) error {
|
||||||
resp, err := client.CreateEntry(ctx, request)
|
resp, err := client.CreateEntry(ctx, request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
|
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, err)
|
||||||
return fmt.Errorf("CreateEntry: %v", err)
|
return fmt.Errorf("CreateEntry: %v", err)
|
||||||
}
|
}
|
||||||
if resp.Error != "" {
|
if resp.Error != "" {
|
||||||
glog.V(1).Infof("create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
|
glog.V(1).InfofCtx(ctx, "create entry %s/%s %v: %v", request.Directory, request.Entry.Name, request.OExcl, resp.Error)
|
||||||
return fmt.Errorf("CreateEntry : %v", resp.Error)
|
return fmt.Errorf("CreateEntry : %v", resp.Error)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -124,7 +124,7 @@ func CreateEntry(ctx context.Context, client SeaweedFilerClient, request *Create
|
||||||
func UpdateEntry(ctx context.Context, client SeaweedFilerClient, request *UpdateEntryRequest) error {
|
func UpdateEntry(ctx context.Context, client SeaweedFilerClient, request *UpdateEntryRequest) error {
|
||||||
_, err := client.UpdateEntry(ctx, request)
|
_, err := client.UpdateEntry(ctx, request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
|
glog.V(1).InfofCtx(ctx, "update entry %s/%s :%v", request.Directory, request.Entry.Name, err)
|
||||||
return fmt.Errorf("UpdateEntry: %v", err)
|
return fmt.Errorf("UpdateEntry: %v", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -136,7 +136,7 @@ func LookupEntry(ctx context.Context, client SeaweedFilerClient, request *Lookup
|
||||||
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
|
if err == ErrNotFound || strings.Contains(err.Error(), ErrNotFound.Error()) {
|
||||||
return nil, ErrNotFound
|
return nil, ErrNotFound
|
||||||
}
|
}
|
||||||
glog.V(3).Infof("read %s/%v: %v", request.Directory, request.Name, err)
|
glog.V(3).InfofCtx(ctx, "read %s/%v: %v", request.Directory, request.Name, err)
|
||||||
return nil, fmt.Errorf("LookupEntry1: %v", err)
|
return nil, fmt.Errorf("LookupEntry1: %v", err)
|
||||||
}
|
}
|
||||||
if resp.Entry == nil {
|
if resp.Entry == nil {
|
||||||
|
|
|
@ -3,8 +3,6 @@ package pb
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/google/uuid"
|
|
||||||
"google.golang.org/grpc/metadata"
|
|
||||||
"math/rand/v2"
|
"math/rand/v2"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
@ -12,6 +10,10 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/seaweedfs/seaweedfs/weed/util/request_id"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
|
||||||
"github.com/seaweedfs/seaweedfs/weed/glog"
|
"github.com/seaweedfs/seaweedfs/weed/glog"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
"github.com/seaweedfs/seaweedfs/weed/pb/volume_server_pb"
|
||||||
"github.com/seaweedfs/seaweedfs/weed/util"
|
"github.com/seaweedfs/seaweedfs/weed/util"
|
||||||
|
@ -128,7 +130,7 @@ func requestIDUnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||||
handler grpc.UnaryHandler,
|
handler grpc.UnaryHandler,
|
||||||
) (interface{}, error) {
|
) (interface{}, error) {
|
||||||
incomingMd, _ := metadata.FromIncomingContext(ctx)
|
incomingMd, _ := metadata.FromIncomingContext(ctx)
|
||||||
idList := incomingMd.Get(util.RequestIDKey)
|
idList := incomingMd.Get(request_id.AmzRequestIDHeader)
|
||||||
var reqID string
|
var reqID string
|
||||||
if len(idList) > 0 {
|
if len(idList) > 0 {
|
||||||
reqID = idList[0]
|
reqID = idList[0]
|
||||||
|
@ -139,11 +141,12 @@ func requestIDUnaryInterceptor() grpc.UnaryServerInterceptor {
|
||||||
|
|
||||||
ctx = metadata.NewOutgoingContext(ctx,
|
ctx = metadata.NewOutgoingContext(ctx,
|
||||||
metadata.New(map[string]string{
|
metadata.New(map[string]string{
|
||||||
util.RequestIDKey: reqID,
|
request_id.AmzRequestIDHeader: reqID,
|
||||||
}))
|
}))
|
||||||
|
|
||||||
ctx = util.WithRequestID(ctx, reqID)
|
ctx = request_id.Set(ctx, reqID)
|
||||||
grpc.SetTrailer(ctx, metadata.Pairs(util.RequestIDKey, reqID))
|
|
||||||
|
grpc.SetTrailer(ctx, metadata.Pairs(request_id.AmzRequestIDHeader, reqID))
|
||||||
|
|
||||||
return handler(ctx, req)
|
return handler(ctx, req)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.36.6
|
||||||
// protoc v5.28.3
|
// protoc v5.29.3
|
||||||
// source: iam.proto
|
// source: iam.proto
|
||||||
|
|
||||||
package iam_pb
|
package iam_pb
|
||||||
|
@ -11,6 +11,7 @@ import (
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
sync "sync"
|
sync "sync"
|
||||||
|
unsafe "unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -21,21 +22,18 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type S3ApiConfiguration struct {
|
type S3ApiConfiguration struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"`
|
Identities []*Identity `protobuf:"bytes,1,rep,name=identities,proto3" json:"identities,omitempty"`
|
||||||
Accounts []*Account `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"`
|
Accounts []*Account `protobuf:"bytes,2,rep,name=accounts,proto3" json:"accounts,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3ApiConfiguration) Reset() {
|
func (x *S3ApiConfiguration) Reset() {
|
||||||
*x = S3ApiConfiguration{}
|
*x = S3ApiConfiguration{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_iam_proto_msgTypes[0]
|
mi := &file_iam_proto_msgTypes[0]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3ApiConfiguration) String() string {
|
func (x *S3ApiConfiguration) String() string {
|
||||||
|
@ -46,7 +44,7 @@ func (*S3ApiConfiguration) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message {
|
func (x *S3ApiConfiguration) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_iam_proto_msgTypes[0]
|
mi := &file_iam_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -76,23 +74,20 @@ func (x *S3ApiConfiguration) GetAccounts() []*Account {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Identity struct {
|
type Identity struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"`
|
Credentials []*Credential `protobuf:"bytes,2,rep,name=credentials,proto3" json:"credentials,omitempty"`
|
||||||
Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
|
Actions []string `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
|
||||||
Account *Account `protobuf:"bytes,4,opt,name=account,proto3" json:"account,omitempty"`
|
Account *Account `protobuf:"bytes,4,opt,name=account,proto3" json:"account,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Identity) Reset() {
|
func (x *Identity) Reset() {
|
||||||
*x = Identity{}
|
*x = Identity{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_iam_proto_msgTypes[1]
|
mi := &file_iam_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Identity) String() string {
|
func (x *Identity) String() string {
|
||||||
|
@ -103,7 +98,7 @@ func (*Identity) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *Identity) ProtoReflect() protoreflect.Message {
|
func (x *Identity) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_iam_proto_msgTypes[1]
|
mi := &file_iam_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -147,21 +142,18 @@ func (x *Identity) GetAccount() *Account {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Credential struct {
|
type Credential struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
|
AccessKey string `protobuf:"bytes,1,opt,name=access_key,json=accessKey,proto3" json:"access_key,omitempty"`
|
||||||
SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
|
SecretKey string `protobuf:"bytes,2,opt,name=secret_key,json=secretKey,proto3" json:"secret_key,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Credential) Reset() {
|
func (x *Credential) Reset() {
|
||||||
*x = Credential{}
|
*x = Credential{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_iam_proto_msgTypes[2]
|
mi := &file_iam_proto_msgTypes[2]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Credential) String() string {
|
func (x *Credential) String() string {
|
||||||
|
@ -172,7 +164,7 @@ func (*Credential) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *Credential) ProtoReflect() protoreflect.Message {
|
func (x *Credential) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_iam_proto_msgTypes[2]
|
mi := &file_iam_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -202,22 +194,19 @@ func (x *Credential) GetSecretKey() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type Account struct {
|
type Account struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||||
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
|
||||||
EmailAddress string `protobuf:"bytes,3,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
|
EmailAddress string `protobuf:"bytes,3,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Account) Reset() {
|
func (x *Account) Reset() {
|
||||||
*x = Account{}
|
*x = Account{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_iam_proto_msgTypes[3]
|
mi := &file_iam_proto_msgTypes[3]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *Account) String() string {
|
func (x *Account) String() string {
|
||||||
|
@ -228,7 +217,7 @@ func (*Account) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *Account) ProtoReflect() protoreflect.Message {
|
func (x *Account) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_iam_proto_msgTypes[3]
|
mi := &file_iam_proto_msgTypes[3]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -266,54 +255,40 @@ func (x *Account) GetEmailAddress() string {
|
||||||
|
|
||||||
var File_iam_proto protoreflect.FileDescriptor
|
var File_iam_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_iam_proto_rawDesc = []byte{
|
const file_iam_proto_rawDesc = "" +
|
||||||
0x0a, 0x09, 0x69, 0x61, 0x6d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x69, 0x61, 0x6d,
|
"\n" +
|
||||||
0x5f, 0x70, 0x62, 0x22, 0x73, 0x0a, 0x12, 0x53, 0x33, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66,
|
"\tiam.proto\x12\x06iam_pb\"s\n" +
|
||||||
0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x64, 0x65,
|
"\x12S3ApiConfiguration\x120\n" +
|
||||||
0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
|
"\n" +
|
||||||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52,
|
"identities\x18\x01 \x03(\v2\x10.iam_pb.IdentityR\n" +
|
||||||
0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x08, 0x61,
|
"identities\x12+\n" +
|
||||||
0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
"\baccounts\x18\x02 \x03(\v2\x0f.iam_pb.AccountR\baccounts\"\x99\x01\n" +
|
||||||
0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x08,
|
"\bIdentity\x12\x12\n" +
|
||||||
0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x49, 0x64, 0x65,
|
"\x04name\x18\x01 \x01(\tR\x04name\x124\n" +
|
||||||
0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
|
"\vcredentials\x18\x02 \x03(\v2\x12.iam_pb.CredentialR\vcredentials\x12\x18\n" +
|
||||||
0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x0b, 0x63, 0x72, 0x65,
|
"\aactions\x18\x03 \x03(\tR\aactions\x12)\n" +
|
||||||
0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12,
|
"\aaccount\x18\x04 \x01(\v2\x0f.iam_pb.AccountR\aaccount\"J\n" +
|
||||||
0x2e, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
|
"\n" +
|
||||||
0x61, 0x6c, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12,
|
"Credential\x12\x1d\n" +
|
||||||
0x18, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09,
|
"\n" +
|
||||||
0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x61, 0x63, 0x63,
|
"access_key\x18\x01 \x01(\tR\taccessKey\x12\x1d\n" +
|
||||||
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x69, 0x61, 0x6d,
|
"\n" +
|
||||||
0x5f, 0x70, 0x62, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x07, 0x61, 0x63, 0x63,
|
"secret_key\x18\x02 \x01(\tR\tsecretKey\"a\n" +
|
||||||
0x6f, 0x75, 0x6e, 0x74, 0x22, 0x4a, 0x0a, 0x0a, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69,
|
"\aAccount\x12\x0e\n" +
|
||||||
0x61, 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79,
|
"\x02id\x18\x01 \x01(\tR\x02id\x12!\n" +
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65,
|
"\fdisplay_name\x18\x02 \x01(\tR\vdisplayName\x12#\n" +
|
||||||
0x79, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
"\remail_address\x18\x03 \x01(\tR\femailAddress2!\n" +
|
||||||
0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79,
|
"\x1fSeaweedIdentityAccessManagementBK\n" +
|
||||||
0x22, 0x61, 0x0a, 0x07, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
"\x10seaweedfs.clientB\bIamProtoZ-github.com/seaweedfs/seaweedfs/weed/pb/iam_pbb\x06proto3"
|
||||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x64,
|
|
||||||
0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
|
|
||||||
0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23,
|
|
||||||
0x0a, 0x0d, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
|
|
||||||
0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72,
|
|
||||||
0x65, 0x73, 0x73, 0x32, 0x21, 0x0a, 0x1f, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x49, 0x64,
|
|
||||||
0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4d, 0x61, 0x6e, 0x61,
|
|
||||||
0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x4b, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65,
|
|
||||||
0x64, 0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x08, 0x49, 0x61, 0x6d, 0x50,
|
|
||||||
0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
|
|
||||||
0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65,
|
|
||||||
0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x69, 0x61, 0x6d,
|
|
||||||
0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_iam_proto_rawDescOnce sync.Once
|
file_iam_proto_rawDescOnce sync.Once
|
||||||
file_iam_proto_rawDescData = file_iam_proto_rawDesc
|
file_iam_proto_rawDescData []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
func file_iam_proto_rawDescGZIP() []byte {
|
func file_iam_proto_rawDescGZIP() []byte {
|
||||||
file_iam_proto_rawDescOnce.Do(func() {
|
file_iam_proto_rawDescOnce.Do(func() {
|
||||||
file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(file_iam_proto_rawDescData)
|
file_iam_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_iam_proto_rawDesc), len(file_iam_proto_rawDesc)))
|
||||||
})
|
})
|
||||||
return file_iam_proto_rawDescData
|
return file_iam_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
@ -342,61 +317,11 @@ func file_iam_proto_init() {
|
||||||
if File_iam_proto != nil {
|
if File_iam_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_iam_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*S3ApiConfiguration); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_iam_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Identity); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_iam_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Credential); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_iam_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*Account); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_iam_proto_rawDesc,
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_iam_proto_rawDesc), len(file_iam_proto_rawDesc)),
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 4,
|
NumMessages: 4,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
|
@ -407,7 +332,6 @@ func file_iam_proto_init() {
|
||||||
MessageInfos: file_iam_proto_msgTypes,
|
MessageInfos: file_iam_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_iam_proto = out.File
|
File_iam_proto = out.File
|
||||||
file_iam_proto_rawDesc = nil
|
|
||||||
file_iam_proto_goTypes = nil
|
file_iam_proto_goTypes = nil
|
||||||
file_iam_proto_depIdxs = nil
|
file_iam_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v5.28.3
|
// - protoc v5.29.3
|
||||||
// source: iam.proto
|
// source: iam.proto
|
||||||
|
|
||||||
package iam_pb
|
package iam_pb
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v5.28.3
|
// - protoc v5.29.3
|
||||||
// source: master.proto
|
// source: master.proto
|
||||||
|
|
||||||
package master_pb
|
package master_pb
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.36.6
|
||||||
// protoc v5.28.3
|
// protoc v5.29.3
|
||||||
// source: mount.proto
|
// source: mount.proto
|
||||||
|
|
||||||
package mount_pb
|
package mount_pb
|
||||||
|
@ -11,6 +11,7 @@ import (
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
sync "sync"
|
sync "sync"
|
||||||
|
unsafe "unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -21,20 +22,17 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type ConfigureRequest struct {
|
type ConfigureRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
CollectionCapacity int64 `protobuf:"varint,1,opt,name=collection_capacity,json=collectionCapacity,proto3" json:"collection_capacity,omitempty"`
|
CollectionCapacity int64 `protobuf:"varint,1,opt,name=collection_capacity,json=collectionCapacity,proto3" json:"collection_capacity,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ConfigureRequest) Reset() {
|
func (x *ConfigureRequest) Reset() {
|
||||||
*x = ConfigureRequest{}
|
*x = ConfigureRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mount_proto_msgTypes[0]
|
mi := &file_mount_proto_msgTypes[0]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ConfigureRequest) String() string {
|
func (x *ConfigureRequest) String() string {
|
||||||
|
@ -45,7 +43,7 @@ func (*ConfigureRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *ConfigureRequest) ProtoReflect() protoreflect.Message {
|
func (x *ConfigureRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mount_proto_msgTypes[0]
|
mi := &file_mount_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -68,18 +66,16 @@ func (x *ConfigureRequest) GetCollectionCapacity() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ConfigureResponse struct {
|
type ConfigureResponse struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ConfigureResponse) Reset() {
|
func (x *ConfigureResponse) Reset() {
|
||||||
*x = ConfigureResponse{}
|
*x = ConfigureResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mount_proto_msgTypes[1]
|
mi := &file_mount_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ConfigureResponse) String() string {
|
func (x *ConfigureResponse) String() string {
|
||||||
|
@ -90,7 +86,7 @@ func (*ConfigureResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *ConfigureResponse) ProtoReflect() protoreflect.Message {
|
func (x *ConfigureResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mount_proto_msgTypes[1]
|
mi := &file_mount_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -107,36 +103,25 @@ func (*ConfigureResponse) Descriptor() ([]byte, []int) {
|
||||||
|
|
||||||
var File_mount_proto protoreflect.FileDescriptor
|
var File_mount_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_mount_proto_rawDesc = []byte{
|
const file_mount_proto_rawDesc = "" +
|
||||||
0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d,
|
"\n" +
|
||||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x43, 0x0a, 0x10, 0x43,
|
"\vmount.proto\x12\fmessaging_pb\"C\n" +
|
||||||
0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
"\x10ConfigureRequest\x12/\n" +
|
||||||
0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x61,
|
"\x13collection_capacity\x18\x01 \x01(\x03R\x12collectionCapacity\"\x13\n" +
|
||||||
0x70, 0x61, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x6f,
|
"\x11ConfigureResponse2^\n" +
|
||||||
0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x61, 0x70, 0x61, 0x63, 0x69, 0x74, 0x79,
|
"\fSeaweedMount\x12N\n" +
|
||||||
0x22, 0x13, 0x0a, 0x11, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73,
|
"\tConfigure\x12\x1e.messaging_pb.ConfigureRequest\x1a\x1f.messaging_pb.ConfigureResponse\"\x00BO\n" +
|
||||||
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x5e, 0x0a, 0x0c, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64,
|
"\x10seaweedfs.clientB\n" +
|
||||||
0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
|
"MountProtoZ/github.com/seaweedfs/seaweedfs/weed/pb/mount_pbb\x06proto3"
|
||||||
0x72, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
|
|
||||||
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
|
|
||||||
0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70,
|
|
||||||
0x62, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
|
||||||
0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4f, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64,
|
|
||||||
0x66, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x4d, 0x6f, 0x75, 0x6e, 0x74,
|
|
||||||
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
|
|
||||||
0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77,
|
|
||||||
0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x6f,
|
|
||||||
0x75, 0x6e, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_mount_proto_rawDescOnce sync.Once
|
file_mount_proto_rawDescOnce sync.Once
|
||||||
file_mount_proto_rawDescData = file_mount_proto_rawDesc
|
file_mount_proto_rawDescData []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
func file_mount_proto_rawDescGZIP() []byte {
|
func file_mount_proto_rawDescGZIP() []byte {
|
||||||
file_mount_proto_rawDescOnce.Do(func() {
|
file_mount_proto_rawDescOnce.Do(func() {
|
||||||
file_mount_proto_rawDescData = protoimpl.X.CompressGZIP(file_mount_proto_rawDescData)
|
file_mount_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mount_proto_rawDesc), len(file_mount_proto_rawDesc)))
|
||||||
})
|
})
|
||||||
return file_mount_proto_rawDescData
|
return file_mount_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
@ -161,37 +146,11 @@ func file_mount_proto_init() {
|
||||||
if File_mount_proto != nil {
|
if File_mount_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_mount_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*ConfigureRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mount_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*ConfigureResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_mount_proto_rawDesc,
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_mount_proto_rawDesc), len(file_mount_proto_rawDesc)),
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 2,
|
NumMessages: 2,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
|
@ -202,7 +161,6 @@ func file_mount_proto_init() {
|
||||||
MessageInfos: file_mount_proto_msgTypes,
|
MessageInfos: file_mount_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_mount_proto = out.File
|
File_mount_proto = out.File
|
||||||
file_mount_proto_rawDesc = nil
|
|
||||||
file_mount_proto_goTypes = nil
|
file_mount_proto_goTypes = nil
|
||||||
file_mount_proto_depIdxs = nil
|
file_mount_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v5.28.3
|
// - protoc v5.29.3
|
||||||
// source: mount.proto
|
// source: mount.proto
|
||||||
|
|
||||||
package mount_pb
|
package mount_pb
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.36.6
|
||||||
// protoc v5.28.3
|
// protoc v5.29.3
|
||||||
// source: mq_agent.proto
|
// source: mq_agent.proto
|
||||||
|
|
||||||
package mq_agent_pb
|
package mq_agent_pb
|
||||||
|
@ -12,6 +12,7 @@ import (
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
sync "sync"
|
sync "sync"
|
||||||
|
unsafe "unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -23,23 +24,20 @@ const (
|
||||||
|
|
||||||
// ////////////////////////////////////////////////
|
// ////////////////////////////////////////////////
|
||||||
type StartPublishSessionRequest struct {
|
type StartPublishSessionRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
|
Topic *schema_pb.Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||||
PartitionCount int32 `protobuf:"varint,2,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"`
|
PartitionCount int32 `protobuf:"varint,2,opt,name=partition_count,json=partitionCount,proto3" json:"partition_count,omitempty"`
|
||||||
RecordType *schema_pb.RecordType `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"`
|
RecordType *schema_pb.RecordType `protobuf:"bytes,3,opt,name=record_type,json=recordType,proto3" json:"record_type,omitempty"`
|
||||||
PublisherName string `protobuf:"bytes,4,opt,name=publisher_name,json=publisherName,proto3" json:"publisher_name,omitempty"`
|
PublisherName string `protobuf:"bytes,4,opt,name=publisher_name,json=publisherName,proto3" json:"publisher_name,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *StartPublishSessionRequest) Reset() {
|
func (x *StartPublishSessionRequest) Reset() {
|
||||||
*x = StartPublishSessionRequest{}
|
*x = StartPublishSessionRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[0]
|
mi := &file_mq_agent_proto_msgTypes[0]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *StartPublishSessionRequest) String() string {
|
func (x *StartPublishSessionRequest) String() string {
|
||||||
|
@ -50,7 +48,7 @@ func (*StartPublishSessionRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *StartPublishSessionRequest) ProtoReflect() protoreflect.Message {
|
func (x *StartPublishSessionRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[0]
|
mi := &file_mq_agent_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -94,21 +92,18 @@ func (x *StartPublishSessionRequest) GetPublisherName() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type StartPublishSessionResponse struct {
|
type StartPublishSessionResponse struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
||||||
SessionId int64 `protobuf:"varint,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
SessionId int64 `protobuf:"varint,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *StartPublishSessionResponse) Reset() {
|
func (x *StartPublishSessionResponse) Reset() {
|
||||||
*x = StartPublishSessionResponse{}
|
*x = StartPublishSessionResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[1]
|
mi := &file_mq_agent_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *StartPublishSessionResponse) String() string {
|
func (x *StartPublishSessionResponse) String() string {
|
||||||
|
@ -119,7 +114,7 @@ func (*StartPublishSessionResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *StartPublishSessionResponse) ProtoReflect() protoreflect.Message {
|
func (x *StartPublishSessionResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[1]
|
mi := &file_mq_agent_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -149,20 +144,17 @@ func (x *StartPublishSessionResponse) GetSessionId() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClosePublishSessionRequest struct {
|
type ClosePublishSessionRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
SessionId int64 `protobuf:"varint,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
SessionId int64 `protobuf:"varint,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ClosePublishSessionRequest) Reset() {
|
func (x *ClosePublishSessionRequest) Reset() {
|
||||||
*x = ClosePublishSessionRequest{}
|
*x = ClosePublishSessionRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[2]
|
mi := &file_mq_agent_proto_msgTypes[2]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ClosePublishSessionRequest) String() string {
|
func (x *ClosePublishSessionRequest) String() string {
|
||||||
|
@ -173,7 +165,7 @@ func (*ClosePublishSessionRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *ClosePublishSessionRequest) ProtoReflect() protoreflect.Message {
|
func (x *ClosePublishSessionRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[2]
|
mi := &file_mq_agent_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -196,20 +188,17 @@ func (x *ClosePublishSessionRequest) GetSessionId() int64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClosePublishSessionResponse struct {
|
type ClosePublishSessionResponse struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ClosePublishSessionResponse) Reset() {
|
func (x *ClosePublishSessionResponse) Reset() {
|
||||||
*x = ClosePublishSessionResponse{}
|
*x = ClosePublishSessionResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[3]
|
mi := &file_mq_agent_proto_msgTypes[3]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *ClosePublishSessionResponse) String() string {
|
func (x *ClosePublishSessionResponse) String() string {
|
||||||
|
@ -220,7 +209,7 @@ func (*ClosePublishSessionResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *ClosePublishSessionResponse) ProtoReflect() protoreflect.Message {
|
func (x *ClosePublishSessionResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[3]
|
mi := &file_mq_agent_proto_msgTypes[3]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -244,22 +233,19 @@ func (x *ClosePublishSessionResponse) GetError() string {
|
||||||
|
|
||||||
// ////////////////////////////////////////////////
|
// ////////////////////////////////////////////////
|
||||||
type PublishRecordRequest struct {
|
type PublishRecordRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
SessionId int64 `protobuf:"varint,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` // session_id is required for the first record
|
SessionId int64 `protobuf:"varint,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` // session_id is required for the first record
|
||||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
Value *schema_pb.RecordValue `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
Value *schema_pb.RecordValue `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *PublishRecordRequest) Reset() {
|
func (x *PublishRecordRequest) Reset() {
|
||||||
*x = PublishRecordRequest{}
|
*x = PublishRecordRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[4]
|
mi := &file_mq_agent_proto_msgTypes[4]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *PublishRecordRequest) String() string {
|
func (x *PublishRecordRequest) String() string {
|
||||||
|
@ -270,7 +256,7 @@ func (*PublishRecordRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *PublishRecordRequest) ProtoReflect() protoreflect.Message {
|
func (x *PublishRecordRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[4]
|
mi := &file_mq_agent_proto_msgTypes[4]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -307,21 +293,18 @@ func (x *PublishRecordRequest) GetValue() *schema_pb.RecordValue {
|
||||||
}
|
}
|
||||||
|
|
||||||
type PublishRecordResponse struct {
|
type PublishRecordResponse struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
AckSequence int64 `protobuf:"varint,1,opt,name=ack_sequence,json=ackSequence,proto3" json:"ack_sequence,omitempty"`
|
AckSequence int64 `protobuf:"varint,1,opt,name=ack_sequence,json=ackSequence,proto3" json:"ack_sequence,omitempty"`
|
||||||
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
|
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *PublishRecordResponse) Reset() {
|
func (x *PublishRecordResponse) Reset() {
|
||||||
*x = PublishRecordResponse{}
|
*x = PublishRecordResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[5]
|
mi := &file_mq_agent_proto_msgTypes[5]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *PublishRecordResponse) String() string {
|
func (x *PublishRecordResponse) String() string {
|
||||||
|
@ -332,7 +315,7 @@ func (*PublishRecordResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *PublishRecordResponse) ProtoReflect() protoreflect.Message {
|
func (x *PublishRecordResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[5]
|
mi := &file_mq_agent_proto_msgTypes[5]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -363,22 +346,19 @@ func (x *PublishRecordResponse) GetError() string {
|
||||||
|
|
||||||
// ////////////////////////////////////////////////
|
// ////////////////////////////////////////////////
|
||||||
type SubscribeRecordRequest struct {
|
type SubscribeRecordRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Init *SubscribeRecordRequest_InitSubscribeRecordRequest `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"`
|
Init *SubscribeRecordRequest_InitSubscribeRecordRequest `protobuf:"bytes,1,opt,name=init,proto3" json:"init,omitempty"`
|
||||||
AckSequence int64 `protobuf:"varint,2,opt,name=ack_sequence,json=ackSequence,proto3" json:"ack_sequence,omitempty"`
|
AckSequence int64 `protobuf:"varint,2,opt,name=ack_sequence,json=ackSequence,proto3" json:"ack_sequence,omitempty"`
|
||||||
AckKey []byte `protobuf:"bytes,3,opt,name=ack_key,json=ackKey,proto3" json:"ack_key,omitempty"`
|
AckKey []byte `protobuf:"bytes,3,opt,name=ack_key,json=ackKey,proto3" json:"ack_key,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *SubscribeRecordRequest) Reset() {
|
func (x *SubscribeRecordRequest) Reset() {
|
||||||
*x = SubscribeRecordRequest{}
|
*x = SubscribeRecordRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[6]
|
mi := &file_mq_agent_proto_msgTypes[6]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *SubscribeRecordRequest) String() string {
|
func (x *SubscribeRecordRequest) String() string {
|
||||||
|
@ -389,7 +369,7 @@ func (*SubscribeRecordRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *SubscribeRecordRequest) ProtoReflect() protoreflect.Message {
|
func (x *SubscribeRecordRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[6]
|
mi := &file_mq_agent_proto_msgTypes[6]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -426,25 +406,22 @@ func (x *SubscribeRecordRequest) GetAckKey() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
type SubscribeRecordResponse struct {
|
type SubscribeRecordResponse struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
|
||||||
Value *schema_pb.RecordValue `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
Value *schema_pb.RecordValue `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
TsNs int64 `protobuf:"varint,4,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"`
|
TsNs int64 `protobuf:"varint,4,opt,name=ts_ns,json=tsNs,proto3" json:"ts_ns,omitempty"`
|
||||||
Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
|
Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"`
|
||||||
IsEndOfStream bool `protobuf:"varint,6,opt,name=is_end_of_stream,json=isEndOfStream,proto3" json:"is_end_of_stream,omitempty"`
|
IsEndOfStream bool `protobuf:"varint,6,opt,name=is_end_of_stream,json=isEndOfStream,proto3" json:"is_end_of_stream,omitempty"`
|
||||||
IsEndOfTopic bool `protobuf:"varint,7,opt,name=is_end_of_topic,json=isEndOfTopic,proto3" json:"is_end_of_topic,omitempty"`
|
IsEndOfTopic bool `protobuf:"varint,7,opt,name=is_end_of_topic,json=isEndOfTopic,proto3" json:"is_end_of_topic,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *SubscribeRecordResponse) Reset() {
|
func (x *SubscribeRecordResponse) Reset() {
|
||||||
*x = SubscribeRecordResponse{}
|
*x = SubscribeRecordResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[7]
|
mi := &file_mq_agent_proto_msgTypes[7]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *SubscribeRecordResponse) String() string {
|
func (x *SubscribeRecordResponse) String() string {
|
||||||
|
@ -455,7 +432,7 @@ func (*SubscribeRecordResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *SubscribeRecordResponse) ProtoReflect() protoreflect.Message {
|
func (x *SubscribeRecordResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[7]
|
mi := &file_mq_agent_proto_msgTypes[7]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -513,10 +490,7 @@ func (x *SubscribeRecordResponse) GetIsEndOfTopic() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
type SubscribeRecordRequest_InitSubscribeRecordRequest struct {
|
type SubscribeRecordRequest_InitSubscribeRecordRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
ConsumerGroup string `protobuf:"bytes,1,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"`
|
ConsumerGroup string `protobuf:"bytes,1,opt,name=consumer_group,json=consumerGroup,proto3" json:"consumer_group,omitempty"`
|
||||||
ConsumerGroupInstanceId string `protobuf:"bytes,2,opt,name=consumer_group_instance_id,json=consumerGroupInstanceId,proto3" json:"consumer_group_instance_id,omitempty"`
|
ConsumerGroupInstanceId string `protobuf:"bytes,2,opt,name=consumer_group_instance_id,json=consumerGroupInstanceId,proto3" json:"consumer_group_instance_id,omitempty"`
|
||||||
Topic *schema_pb.Topic `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"`
|
Topic *schema_pb.Topic `protobuf:"bytes,4,opt,name=topic,proto3" json:"topic,omitempty"`
|
||||||
|
@ -526,15 +500,15 @@ type SubscribeRecordRequest_InitSubscribeRecordRequest struct {
|
||||||
Filter string `protobuf:"bytes,10,opt,name=filter,proto3" json:"filter,omitempty"`
|
Filter string `protobuf:"bytes,10,opt,name=filter,proto3" json:"filter,omitempty"`
|
||||||
MaxSubscribedPartitions int32 `protobuf:"varint,11,opt,name=max_subscribed_partitions,json=maxSubscribedPartitions,proto3" json:"max_subscribed_partitions,omitempty"`
|
MaxSubscribedPartitions int32 `protobuf:"varint,11,opt,name=max_subscribed_partitions,json=maxSubscribedPartitions,proto3" json:"max_subscribed_partitions,omitempty"`
|
||||||
SlidingWindowSize int32 `protobuf:"varint,12,opt,name=sliding_window_size,json=slidingWindowSize,proto3" json:"sliding_window_size,omitempty"`
|
SlidingWindowSize int32 `protobuf:"varint,12,opt,name=sliding_window_size,json=slidingWindowSize,proto3" json:"sliding_window_size,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) Reset() {
|
func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) Reset() {
|
||||||
*x = SubscribeRecordRequest_InitSubscribeRecordRequest{}
|
*x = SubscribeRecordRequest_InitSubscribeRecordRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_mq_agent_proto_msgTypes[8]
|
mi := &file_mq_agent_proto_msgTypes[8]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) String() string {
|
func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) String() string {
|
||||||
|
@ -545,7 +519,7 @@ func (*SubscribeRecordRequest_InitSubscribeRecordRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) ProtoReflect() protoreflect.Message {
|
func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_mq_agent_proto_msgTypes[8]
|
mi := &file_mq_agent_proto_msgTypes[8]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -625,146 +599,71 @@ func (x *SubscribeRecordRequest_InitSubscribeRecordRequest) GetSlidingWindowSize
|
||||||
|
|
||||||
var File_mq_agent_proto protoreflect.FileDescriptor
|
var File_mq_agent_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_mq_agent_proto_rawDesc = []byte{
|
const file_mq_agent_proto_rawDesc = "" +
|
||||||
0x0a, 0x0e, 0x6d, 0x71, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
"\n" +
|
||||||
0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x1a, 0x0f,
|
"\x0emq_agent.proto\x12\fmessaging_pb\x1a\x0fmq_schema.proto\"\xcc\x01\n" +
|
||||||
0x6d, 0x71, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
|
"\x1aStartPublishSessionRequest\x12&\n" +
|
||||||
0xcc, 0x01, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68,
|
"\x05topic\x18\x01 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12'\n" +
|
||||||
0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x26,
|
"\x0fpartition_count\x18\x02 \x01(\x05R\x0epartitionCount\x126\n" +
|
||||||
0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e,
|
"\vrecord_type\x18\x03 \x01(\v2\x15.schema_pb.RecordTypeR\n" +
|
||||||
0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52,
|
"recordType\x12%\n" +
|
||||||
0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
|
"\x0epublisher_name\x18\x04 \x01(\tR\rpublisherName\"R\n" +
|
||||||
0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
|
"\x1bStartPublishSessionResponse\x12\x14\n" +
|
||||||
0x0e, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
|
"\x05error\x18\x01 \x01(\tR\x05error\x12\x1d\n" +
|
||||||
0x36, 0x0a, 0x0b, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03,
|
"\n" +
|
||||||
0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x70, 0x62,
|
"session_id\x18\x02 \x01(\x03R\tsessionId\";\n" +
|
||||||
0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x63,
|
"\x1aClosePublishSessionRequest\x12\x1d\n" +
|
||||||
0x6f, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x75, 0x62, 0x6c, 0x69,
|
"\n" +
|
||||||
0x73, 0x68, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
|
"session_id\x18\x01 \x01(\x03R\tsessionId\"3\n" +
|
||||||
0x0d, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x52,
|
"\x1bClosePublishSessionResponse\x12\x14\n" +
|
||||||
0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53, 0x65,
|
"\x05error\x18\x01 \x01(\tR\x05error\"u\n" +
|
||||||
0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a,
|
"\x14PublishRecordRequest\x12\x1d\n" +
|
||||||
0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72,
|
"\n" +
|
||||||
0x72, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
|
"session_id\x18\x01 \x01(\x03R\tsessionId\x12\x10\n" +
|
||||||
0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
"\x03key\x18\x02 \x01(\fR\x03key\x12,\n" +
|
||||||
0x49, 0x64, 0x22, 0x3b, 0x0a, 0x1a, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69,
|
"\x05value\x18\x03 \x01(\v2\x16.schema_pb.RecordValueR\x05value\"P\n" +
|
||||||
0x73, 0x68, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
"\x15PublishRecordResponse\x12!\n" +
|
||||||
0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01,
|
"\fack_sequence\x18\x01 \x01(\x03R\vackSequence\x12\x14\n" +
|
||||||
0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22,
|
"\x05error\x18\x02 \x01(\tR\x05error\"\xfb\x04\n" +
|
||||||
0x33, 0x0a, 0x1b, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53,
|
"\x16SubscribeRecordRequest\x12S\n" +
|
||||||
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14,
|
"\x04init\x18\x01 \x01(\v2?.messaging_pb.SubscribeRecordRequest.InitSubscribeRecordRequestR\x04init\x12!\n" +
|
||||||
0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65,
|
"\fack_sequence\x18\x02 \x01(\x03R\vackSequence\x12\x17\n" +
|
||||||
0x72, 0x72, 0x6f, 0x72, 0x22, 0x75, 0x0a, 0x14, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52,
|
"\aack_key\x18\x03 \x01(\fR\x06ackKey\x1a\xcf\x03\n" +
|
||||||
0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a,
|
"\x1aInitSubscribeRecordRequest\x12%\n" +
|
||||||
0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
|
"\x0econsumer_group\x18\x01 \x01(\tR\rconsumerGroup\x12;\n" +
|
||||||
0x52, 0x09, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
"\x1aconsumer_group_instance_id\x18\x02 \x01(\tR\x17consumerGroupInstanceId\x12&\n" +
|
||||||
0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a,
|
"\x05topic\x18\x04 \x01(\v2\x10.schema_pb.TopicR\x05topic\x12G\n" +
|
||||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73,
|
"\x11partition_offsets\x18\x05 \x03(\v2\x1a.schema_pb.PartitionOffsetR\x10partitionOffsets\x126\n" +
|
||||||
0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x56,
|
"\voffset_type\x18\x06 \x01(\x0e2\x15.schema_pb.OffsetTypeR\n" +
|
||||||
0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x50, 0x0a, 0x15, 0x50,
|
"offsetType\x12 \n" +
|
||||||
0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70,
|
"\foffset_ts_ns\x18\a \x01(\x03R\n" +
|
||||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x71, 0x75,
|
"offsetTsNs\x12\x16\n" +
|
||||||
0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x63, 0x6b, 0x53,
|
"\x06filter\x18\n" +
|
||||||
0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72,
|
" \x01(\tR\x06filter\x12:\n" +
|
||||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0xfb, 0x04,
|
"\x19max_subscribed_partitions\x18\v \x01(\x05R\x17maxSubscribedPartitions\x12.\n" +
|
||||||
0x0a, 0x16, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72,
|
"\x13sliding_window_size\x18\f \x01(\x05R\x11slidingWindowSize\"\xd4\x01\n" +
|
||||||
0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74,
|
"\x17SubscribeRecordResponse\x12\x10\n" +
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69,
|
"\x03key\x18\x02 \x01(\fR\x03key\x12,\n" +
|
||||||
0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52,
|
"\x05value\x18\x03 \x01(\v2\x16.schema_pb.RecordValueR\x05value\x12\x13\n" +
|
||||||
0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x49, 0x6e, 0x69,
|
"\x05ts_ns\x18\x04 \x01(\x03R\x04tsNs\x12\x14\n" +
|
||||||
0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
|
"\x05error\x18\x05 \x01(\tR\x05error\x12'\n" +
|
||||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x12, 0x21, 0x0a,
|
"\x10is_end_of_stream\x18\x06 \x01(\bR\risEndOfStream\x12%\n" +
|
||||||
0x0c, 0x61, 0x63, 0x6b, 0x5f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20,
|
"\x0fis_end_of_topic\x18\a \x01(\bR\fisEndOfTopic2\xb9\x03\n" +
|
||||||
0x01, 0x28, 0x03, 0x52, 0x0b, 0x61, 0x63, 0x6b, 0x53, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65,
|
"\x15SeaweedMessagingAgent\x12l\n" +
|
||||||
0x12, 0x17, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28,
|
"\x13StartPublishSession\x12(.messaging_pb.StartPublishSessionRequest\x1a).messaging_pb.StartPublishSessionResponse\"\x00\x12l\n" +
|
||||||
0x0c, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x4b, 0x65, 0x79, 0x1a, 0xcf, 0x03, 0x0a, 0x1a, 0x49, 0x6e,
|
"\x13ClosePublishSession\x12(.messaging_pb.ClosePublishSessionRequest\x1a).messaging_pb.ClosePublishSessionResponse\"\x00\x12^\n" +
|
||||||
0x69, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72,
|
"\rPublishRecord\x12\".messaging_pb.PublishRecordRequest\x1a#.messaging_pb.PublishRecordResponse\"\x00(\x010\x01\x12d\n" +
|
||||||
0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x73,
|
"\x0fSubscribeRecord\x12$.messaging_pb.SubscribeRecordRequest\x1a%.messaging_pb.SubscribeRecordResponse\"\x00(\x010\x01B`\n" +
|
||||||
0x75, 0x6d, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
"\x12seaweedfs.mq_agentB\x16MessageQueueAgentProtoZ2github.com/seaweedfs/seaweedfs/weed/pb/mq_agent_pbb\x06proto3"
|
||||||
0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12,
|
|
||||||
0x3b, 0x0a, 0x1a, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75,
|
|
||||||
0x70, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
|
|
||||||
0x01, 0x28, 0x09, 0x52, 0x17, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x72, 0x47, 0x72, 0x6f,
|
|
||||||
0x75, 0x70, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x05,
|
|
||||||
0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x73, 0x63,
|
|
||||||
0x68, 0x65, 0x6d, 0x61, 0x5f, 0x70, 0x62, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74,
|
|
||||||
0x6f, 0x70, 0x69, 0x63, 0x12, 0x47, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f,
|
|
||||||
0x6e, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32,
|
|
||||||
0x1a, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x61, 0x72, 0x74,
|
|
||||||
0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x52, 0x10, 0x70, 0x61, 0x72,
|
|
||||||
0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x12, 0x36, 0x0a,
|
|
||||||
0x0b, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
|
|
||||||
0x28, 0x0e, 0x32, 0x15, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x70, 0x62, 0x2e, 0x4f,
|
|
||||||
0x66, 0x66, 0x73, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x6f, 0x66, 0x66, 0x73, 0x65,
|
|
||||||
0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0c, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x5f,
|
|
||||||
0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x66, 0x66,
|
|
||||||
0x73, 0x65, 0x74, 0x54, 0x73, 0x4e, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
|
|
||||||
0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
|
|
||||||
0x3a, 0x0a, 0x19, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
|
|
||||||
0x64, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x01,
|
|
||||||
0x28, 0x05, 0x52, 0x17, 0x6d, 0x61, 0x78, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65,
|
|
||||||
0x64, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x73,
|
|
||||||
0x6c, 0x69, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f, 0x73, 0x69,
|
|
||||||
0x7a, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x6c, 0x69, 0x64, 0x69, 0x6e,
|
|
||||||
0x67, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xd4, 0x01, 0x0a, 0x17,
|
|
||||||
0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52,
|
|
||||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x02,
|
|
||||||
0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
|
||||||
0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x73, 0x63, 0x68, 0x65, 0x6d,
|
|
||||||
0x61, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
|
||||||
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x13, 0x0a, 0x05, 0x74, 0x73, 0x5f, 0x6e, 0x73,
|
|
||||||
0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x74, 0x73, 0x4e, 0x73, 0x12, 0x14, 0x0a, 0x05,
|
|
||||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72,
|
|
||||||
0x6f, 0x72, 0x12, 0x27, 0x0a, 0x10, 0x69, 0x73, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6f, 0x66, 0x5f,
|
|
||||||
0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x73,
|
|
||||||
0x45, 0x6e, 0x64, 0x4f, 0x66, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0f, 0x69,
|
|
||||||
0x73, 0x5f, 0x65, 0x6e, 0x64, 0x5f, 0x6f, 0x66, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x07,
|
|
||||||
0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x69, 0x73, 0x45, 0x6e, 0x64, 0x4f, 0x66, 0x54, 0x6f, 0x70,
|
|
||||||
0x69, 0x63, 0x32, 0xb9, 0x03, 0x0a, 0x15, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x4d, 0x65,
|
|
||||||
0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x6c, 0x0a, 0x13,
|
|
||||||
0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53, 0x65, 0x73, 0x73,
|
|
||||||
0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
|
|
||||||
0x70, 0x62, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53,
|
|
||||||
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e,
|
|
||||||
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61,
|
|
||||||
0x72, 0x74, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
|
|
||||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x6c, 0x0a, 0x13, 0x43, 0x6c,
|
|
||||||
0x6f, 0x73, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
|
||||||
0x6e, 0x12, 0x28, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62,
|
|
||||||
0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53, 0x65, 0x73,
|
|
||||||
0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x65,
|
|
||||||
0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6c, 0x6f, 0x73, 0x65,
|
|
||||||
0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65,
|
|
||||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5e, 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x6c,
|
|
||||||
0x69, 0x73, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x22, 0x2e, 0x6d, 0x65, 0x73, 0x73,
|
|
||||||
0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68,
|
|
||||||
0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e,
|
|
||||||
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x62,
|
|
||||||
0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
|
||||||
0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x64, 0x0a, 0x0f, 0x53, 0x75, 0x62, 0x73,
|
|
||||||
0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x24, 0x2e, 0x6d, 0x65,
|
|
||||||
0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63,
|
|
||||||
0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
|
|
||||||
0x74, 0x1a, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62,
|
|
||||||
0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
|
|
||||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x42, 0x60,
|
|
||||||
0x0a, 0x12, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e, 0x6d, 0x71, 0x5f, 0x61,
|
|
||||||
0x67, 0x65, 0x6e, 0x74, 0x42, 0x16, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x51, 0x75, 0x65,
|
|
||||||
0x75, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x5a, 0x32, 0x67, 0x69,
|
|
||||||
0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64,
|
|
||||||
0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65,
|
|
||||||
0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x71, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x62,
|
|
||||||
0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_mq_agent_proto_rawDescOnce sync.Once
|
file_mq_agent_proto_rawDescOnce sync.Once
|
||||||
file_mq_agent_proto_rawDescData = file_mq_agent_proto_rawDesc
|
file_mq_agent_proto_rawDescData []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
func file_mq_agent_proto_rawDescGZIP() []byte {
|
func file_mq_agent_proto_rawDescGZIP() []byte {
|
||||||
file_mq_agent_proto_rawDescOnce.Do(func() {
|
file_mq_agent_proto_rawDescOnce.Do(func() {
|
||||||
file_mq_agent_proto_rawDescData = protoimpl.X.CompressGZIP(file_mq_agent_proto_rawDescData)
|
file_mq_agent_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_mq_agent_proto_rawDesc), len(file_mq_agent_proto_rawDesc)))
|
||||||
})
|
})
|
||||||
return file_mq_agent_proto_rawDescData
|
return file_mq_agent_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
@ -815,121 +714,11 @@ func file_mq_agent_proto_init() {
|
||||||
if File_mq_agent_proto != nil {
|
if File_mq_agent_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_mq_agent_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*StartPublishSessionRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*StartPublishSessionResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*ClosePublishSessionRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*ClosePublishSessionResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[4].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*PublishRecordRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[5].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*PublishRecordResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[6].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*SubscribeRecordRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[7].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*SubscribeRecordResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_mq_agent_proto_msgTypes[8].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*SubscribeRecordRequest_InitSubscribeRecordRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_mq_agent_proto_rawDesc,
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_mq_agent_proto_rawDesc), len(file_mq_agent_proto_rawDesc)),
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 9,
|
NumMessages: 9,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
|
@ -940,7 +729,6 @@ func file_mq_agent_proto_init() {
|
||||||
MessageInfos: file_mq_agent_proto_msgTypes,
|
MessageInfos: file_mq_agent_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_mq_agent_proto = out.File
|
File_mq_agent_proto = out.File
|
||||||
file_mq_agent_proto_rawDesc = nil
|
|
||||||
file_mq_agent_proto_goTypes = nil
|
file_mq_agent_proto_goTypes = nil
|
||||||
file_mq_agent_proto_depIdxs = nil
|
file_mq_agent_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v5.28.3
|
// - protoc v5.29.3
|
||||||
// source: mq_agent.proto
|
// source: mq_agent.proto
|
||||||
|
|
||||||
package mq_agent_pb
|
package mq_agent_pb
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// - protoc-gen-go-grpc v1.5.1
|
// - protoc-gen-go-grpc v1.5.1
|
||||||
// - protoc v5.28.3
|
// - protoc v5.29.3
|
||||||
// source: mq_broker.proto
|
// source: mq_broker.proto
|
||||||
|
|
||||||
package mq_pb
|
package mq_pb
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.36.6
|
||||||
// protoc v5.28.3
|
// protoc v5.29.3
|
||||||
// source: remote.proto
|
// source: remote.proto
|
||||||
|
|
||||||
package remote_pb
|
package remote_pb
|
||||||
|
@ -11,6 +11,7 @@ import (
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
sync "sync"
|
sync "sync"
|
||||||
|
unsafe "unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -24,10 +25,7 @@ const (
|
||||||
// Remote Storage related
|
// Remote Storage related
|
||||||
// ///////////////////////
|
// ///////////////////////
|
||||||
type RemoteConf struct {
|
type RemoteConf struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
S3AccessKey string `protobuf:"bytes,4,opt,name=s3_access_key,json=s3AccessKey,proto3" json:"s3_access_key,omitempty"`
|
S3AccessKey string `protobuf:"bytes,4,opt,name=s3_access_key,json=s3AccessKey,proto3" json:"s3_access_key,omitempty"`
|
||||||
|
@ -71,15 +69,15 @@ type RemoteConf struct {
|
||||||
ContaboSecretKey string `protobuf:"bytes,69,opt,name=contabo_secret_key,json=contaboSecretKey,proto3" json:"contabo_secret_key,omitempty"`
|
ContaboSecretKey string `protobuf:"bytes,69,opt,name=contabo_secret_key,json=contaboSecretKey,proto3" json:"contabo_secret_key,omitempty"`
|
||||||
ContaboEndpoint string `protobuf:"bytes,70,opt,name=contabo_endpoint,json=contaboEndpoint,proto3" json:"contabo_endpoint,omitempty"`
|
ContaboEndpoint string `protobuf:"bytes,70,opt,name=contabo_endpoint,json=contaboEndpoint,proto3" json:"contabo_endpoint,omitempty"`
|
||||||
ContaboRegion string `protobuf:"bytes,71,opt,name=contabo_region,json=contaboRegion,proto3" json:"contabo_region,omitempty"`
|
ContaboRegion string `protobuf:"bytes,71,opt,name=contabo_region,json=contaboRegion,proto3" json:"contabo_region,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RemoteConf) Reset() {
|
func (x *RemoteConf) Reset() {
|
||||||
*x = RemoteConf{}
|
*x = RemoteConf{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_remote_proto_msgTypes[0]
|
mi := &file_remote_proto_msgTypes[0]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RemoteConf) String() string {
|
func (x *RemoteConf) String() string {
|
||||||
|
@ -90,7 +88,7 @@ func (*RemoteConf) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RemoteConf) ProtoReflect() protoreflect.Message {
|
func (x *RemoteConf) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_remote_proto_msgTypes[0]
|
mi := &file_remote_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -407,21 +405,18 @@ func (x *RemoteConf) GetContaboRegion() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteStorageMapping struct {
|
type RemoteStorageMapping struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
Mappings map[string]*RemoteStorageLocation `protobuf:"bytes,1,rep,name=mappings,proto3" json:"mappings,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Mappings map[string]*RemoteStorageLocation `protobuf:"bytes,1,rep,name=mappings,proto3" json:"mappings,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
|
||||||
PrimaryBucketStorageName string `protobuf:"bytes,2,opt,name=primary_bucket_storage_name,json=primaryBucketStorageName,proto3" json:"primary_bucket_storage_name,omitempty"`
|
PrimaryBucketStorageName string `protobuf:"bytes,2,opt,name=primary_bucket_storage_name,json=primaryBucketStorageName,proto3" json:"primary_bucket_storage_name,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RemoteStorageMapping) Reset() {
|
func (x *RemoteStorageMapping) Reset() {
|
||||||
*x = RemoteStorageMapping{}
|
*x = RemoteStorageMapping{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_remote_proto_msgTypes[1]
|
mi := &file_remote_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RemoteStorageMapping) String() string {
|
func (x *RemoteStorageMapping) String() string {
|
||||||
|
@ -432,7 +427,7 @@ func (*RemoteStorageMapping) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RemoteStorageMapping) ProtoReflect() protoreflect.Message {
|
func (x *RemoteStorageMapping) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_remote_proto_msgTypes[1]
|
mi := &file_remote_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -462,22 +457,19 @@ func (x *RemoteStorageMapping) GetPrimaryBucketStorageName() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
type RemoteStorageLocation struct {
|
type RemoteStorageLocation struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||||
Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
|
Bucket string `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
|
||||||
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
|
Path string `protobuf:"bytes,3,opt,name=path,proto3" json:"path,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RemoteStorageLocation) Reset() {
|
func (x *RemoteStorageLocation) Reset() {
|
||||||
*x = RemoteStorageLocation{}
|
*x = RemoteStorageLocation{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_remote_proto_msgTypes[2]
|
mi := &file_remote_proto_msgTypes[2]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *RemoteStorageLocation) String() string {
|
func (x *RemoteStorageLocation) String() string {
|
||||||
|
@ -488,7 +480,7 @@ func (*RemoteStorageLocation) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *RemoteStorageLocation) ProtoReflect() protoreflect.Message {
|
func (x *RemoteStorageLocation) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_remote_proto_msgTypes[2]
|
mi := &file_remote_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -526,160 +518,77 @@ func (x *RemoteStorageLocation) GetPath() string {
|
||||||
|
|
||||||
var File_remote_proto protoreflect.FileDescriptor
|
var File_remote_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_remote_proto_rawDesc = []byte{
|
const file_remote_proto_rawDesc = "" +
|
||||||
0x0a, 0x0c, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x09,
|
"\n" +
|
||||||
0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x22, 0x9b, 0x0e, 0x0a, 0x0a, 0x52, 0x65,
|
"\fremote.proto\x12\tremote_pb\"\x9b\x0e\n" +
|
||||||
0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65,
|
"\n" +
|
||||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04,
|
"RemoteConf\x12\x12\n" +
|
||||||
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
|
"\x04type\x18\x01 \x01(\tR\x04type\x12\x12\n" +
|
||||||
0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65,
|
"\x04name\x18\x02 \x01(\tR\x04name\x12\"\n" +
|
||||||
0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x41, 0x63, 0x63, 0x65, 0x73,
|
"\rs3_access_key\x18\x04 \x01(\tR\vs3AccessKey\x12\"\n" +
|
||||||
0x73, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0d, 0x73, 0x33, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65,
|
"\rs3_secret_key\x18\x05 \x01(\tR\vs3SecretKey\x12\x1b\n" +
|
||||||
0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x33, 0x53,
|
"\ts3_region\x18\x06 \x01(\tR\bs3Region\x12\x1f\n" +
|
||||||
0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x33, 0x5f, 0x72,
|
"\vs3_endpoint\x18\a \x01(\tR\n" +
|
||||||
0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x33, 0x52,
|
"s3Endpoint\x12(\n" +
|
||||||
0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x33, 0x5f, 0x65, 0x6e, 0x64, 0x70,
|
"\x10s3_storage_class\x18\b \x01(\tR\x0es3StorageClass\x12-\n" +
|
||||||
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x33, 0x45, 0x6e,
|
"\x13s3_force_path_style\x18\t \x01(\bR\x10s3ForcePathStyle\x12,\n" +
|
||||||
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x33, 0x5f, 0x73, 0x74, 0x6f,
|
"\x12s3_support_tagging\x18\r \x01(\bR\x10s3SupportTagging\x12&\n" +
|
||||||
0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
|
"\x0fs3_v4_signature\x18\v \x01(\bR\rs3V4Signature\x12K\n" +
|
||||||
0x52, 0x0e, 0x73, 0x33, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
|
"\"gcs_google_application_credentials\x18\n" +
|
||||||
0x12, 0x2d, 0x0a, 0x13, 0x73, 0x33, 0x5f, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x61, 0x74,
|
" \x01(\tR\x1fgcsGoogleApplicationCredentials\x12$\n" +
|
||||||
0x68, 0x5f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73,
|
"\x0egcs_project_id\x18\f \x01(\tR\fgcsProjectId\x12,\n" +
|
||||||
0x33, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x50, 0x61, 0x74, 0x68, 0x53, 0x74, 0x79, 0x6c, 0x65, 0x12,
|
"\x12azure_account_name\x18\x0f \x01(\tR\x10azureAccountName\x12*\n" +
|
||||||
0x2c, 0x0a, 0x12, 0x73, 0x33, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x74, 0x61,
|
"\x11azure_account_key\x18\x10 \x01(\tR\x0fazureAccountKey\x12(\n" +
|
||||||
0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x73, 0x33, 0x53,
|
"\x10backblaze_key_id\x18\x14 \x01(\tR\x0ebackblazeKeyId\x12:\n" +
|
||||||
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a,
|
"\x19backblaze_application_key\x18\x15 \x01(\tR\x17backblazeApplicationKey\x12-\n" +
|
||||||
0x0f, 0x73, 0x33, 0x5f, 0x76, 0x34, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65,
|
"\x12backblaze_endpoint\x18\x16 \x01(\tR\x11backblazeEndpoint\x12)\n" +
|
||||||
0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x33, 0x56, 0x34, 0x53, 0x69, 0x67, 0x6e,
|
"\x10backblaze_region\x18\x17 \x01(\tR\x0fbackblazeRegion\x12*\n" +
|
||||||
0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x4b, 0x0a, 0x22, 0x67, 0x63, 0x73, 0x5f, 0x67, 0x6f, 0x6f,
|
"\x11aliyun_access_key\x18\x19 \x01(\tR\x0faliyunAccessKey\x12*\n" +
|
||||||
0x67, 0x6c, 0x65, 0x5f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
|
"\x11aliyun_secret_key\x18\x1a \x01(\tR\x0faliyunSecretKey\x12'\n" +
|
||||||
0x63, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
|
"\x0faliyun_endpoint\x18\x1b \x01(\tR\x0ealiyunEndpoint\x12#\n" +
|
||||||
0x09, 0x52, 0x1f, 0x67, 0x63, 0x73, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x41, 0x70, 0x70, 0x6c,
|
"\raliyun_region\x18\x1c \x01(\tR\faliyunRegion\x12*\n" +
|
||||||
0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61,
|
"\x11tencent_secret_id\x18\x1e \x01(\tR\x0ftencentSecretId\x12,\n" +
|
||||||
0x6c, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x67, 0x63, 0x73, 0x5f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
|
"\x12tencent_secret_key\x18\x1f \x01(\tR\x10tencentSecretKey\x12)\n" +
|
||||||
0x74, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x67, 0x63, 0x73, 0x50,
|
"\x10tencent_endpoint\x18 \x01(\tR\x0ftencentEndpoint\x12(\n" +
|
||||||
0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x61, 0x7a, 0x75, 0x72,
|
"\x10baidu_access_key\x18# \x01(\tR\x0ebaiduAccessKey\x12(\n" +
|
||||||
0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0f,
|
"\x10baidu_secret_key\x18$ \x01(\tR\x0ebaiduSecretKey\x12%\n" +
|
||||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75,
|
"\x0ebaidu_endpoint\x18% \x01(\tR\rbaiduEndpoint\x12!\n" +
|
||||||
0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x5f,
|
"\fbaidu_region\x18& \x01(\tR\vbaiduRegion\x12*\n" +
|
||||||
0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x10, 0x20, 0x01, 0x28,
|
"\x11wasabi_access_key\x18( \x01(\tR\x0fwasabiAccessKey\x12*\n" +
|
||||||
0x09, 0x52, 0x0f, 0x61, 0x7a, 0x75, 0x72, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x4b,
|
"\x11wasabi_secret_key\x18) \x01(\tR\x0fwasabiSecretKey\x12'\n" +
|
||||||
0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f,
|
"\x0fwasabi_endpoint\x18* \x01(\tR\x0ewasabiEndpoint\x12#\n" +
|
||||||
0x6b, 0x65, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61,
|
"\rwasabi_region\x18+ \x01(\tR\fwasabiRegion\x12.\n" +
|
||||||
0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x4b, 0x65, 0x79, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x19,
|
"\x13filebase_access_key\x18< \x01(\tR\x11filebaseAccessKey\x12.\n" +
|
||||||
0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63,
|
"\x13filebase_secret_key\x18= \x01(\tR\x11filebaseSecretKey\x12+\n" +
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52,
|
"\x11filebase_endpoint\x18> \x01(\tR\x10filebaseEndpoint\x12(\n" +
|
||||||
0x17, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63,
|
"\x10storj_access_key\x18A \x01(\tR\x0estorjAccessKey\x12(\n" +
|
||||||
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x2d, 0x0a, 0x12, 0x62, 0x61, 0x63, 0x6b,
|
"\x10storj_secret_key\x18B \x01(\tR\x0estorjSecretKey\x12%\n" +
|
||||||
0x62, 0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x16,
|
"\x0estorj_endpoint\x18C \x01(\tR\rstorjEndpoint\x12,\n" +
|
||||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x45,
|
"\x12contabo_access_key\x18D \x01(\tR\x10contaboAccessKey\x12,\n" +
|
||||||
0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x62, 0x61, 0x63, 0x6b, 0x62,
|
"\x12contabo_secret_key\x18E \x01(\tR\x10contaboSecretKey\x12)\n" +
|
||||||
0x6c, 0x61, 0x7a, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28,
|
"\x10contabo_endpoint\x18F \x01(\tR\x0fcontaboEndpoint\x12%\n" +
|
||||||
0x09, 0x52, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x62, 0x6c, 0x61, 0x7a, 0x65, 0x52, 0x65, 0x67, 0x69,
|
"\x0econtabo_region\x18G \x01(\tR\rcontaboRegion\"\xff\x01\n" +
|
||||||
0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x61, 0x63, 0x63,
|
"\x14RemoteStorageMapping\x12I\n" +
|
||||||
0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61,
|
"\bmappings\x18\x01 \x03(\v2-.remote_pb.RemoteStorageMapping.MappingsEntryR\bmappings\x12=\n" +
|
||||||
0x6c, 0x69, 0x79, 0x75, 0x6e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a,
|
"\x1bprimary_bucket_storage_name\x18\x02 \x01(\tR\x18primaryBucketStorageName\x1a]\n" +
|
||||||
0x0a, 0x11, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f,
|
"\rMappingsEntry\x12\x10\n" +
|
||||||
0x6b, 0x65, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x79, 0x75,
|
"\x03key\x18\x01 \x01(\tR\x03key\x126\n" +
|
||||||
0x6e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x6c,
|
"\x05value\x18\x02 \x01(\v2 .remote_pb.RemoteStorageLocationR\x05value:\x028\x01\"W\n" +
|
||||||
0x69, 0x79, 0x75, 0x6e, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x1b, 0x20,
|
"\x15RemoteStorageLocation\x12\x12\n" +
|
||||||
0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x45, 0x6e, 0x64, 0x70, 0x6f,
|
"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
|
||||||
0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x69, 0x79, 0x75, 0x6e, 0x5f, 0x72, 0x65,
|
"\x06bucket\x18\x02 \x01(\tR\x06bucket\x12\x12\n" +
|
||||||
0x67, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x69, 0x79,
|
"\x04path\x18\x03 \x01(\tR\x04pathBP\n" +
|
||||||
0x75, 0x6e, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x65, 0x6e, 0x63,
|
"\x10seaweedfs.clientB\n" +
|
||||||
0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x1e, 0x20,
|
"FilerProtoZ0github.com/seaweedfs/seaweedfs/weed/pb/remote_pbb\x06proto3"
|
||||||
0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72,
|
|
||||||
0x65, 0x74, 0x49, 0x64, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f,
|
|
||||||
0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09,
|
|
||||||
0x52, 0x10, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b,
|
|
||||||
0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e,
|
|
||||||
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x65,
|
|
||||||
0x6e, 0x63, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28, 0x0a,
|
|
||||||
0x10, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65,
|
|
||||||
0x79, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x41, 0x63,
|
|
||||||
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x61, 0x69, 0x64, 0x75,
|
|
||||||
0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x24, 0x20, 0x01, 0x28,
|
|
||||||
0x09, 0x52, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65,
|
|
||||||
0x79, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x61, 0x69, 0x64, 0x75, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f,
|
|
||||||
0x69, 0x6e, 0x74, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x62, 0x61, 0x69, 0x64, 0x75,
|
|
||||||
0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x61, 0x69, 0x64,
|
|
||||||
0x75, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x26, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
|
|
||||||
0x62, 0x61, 0x69, 0x64, 0x75, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x77,
|
|
||||||
0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79,
|
|
||||||
0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x41, 0x63,
|
|
||||||
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x77, 0x61, 0x73, 0x61, 0x62,
|
|
||||||
0x69, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x29, 0x20, 0x01,
|
|
||||||
0x28, 0x09, 0x52, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
|
|
||||||
0x4b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x65, 0x6e,
|
|
||||||
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x77, 0x61,
|
|
||||||
0x73, 0x61, 0x62, 0x69, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d,
|
|
||||||
0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x2b, 0x20,
|
|
||||||
0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x61, 0x73, 0x61, 0x62, 0x69, 0x52, 0x65, 0x67, 0x69, 0x6f,
|
|
||||||
0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x61, 0x63,
|
|
||||||
0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
|
|
||||||
0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65,
|
|
||||||
0x79, 0x12, 0x2e, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x73, 0x65,
|
|
||||||
0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x3d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
|
|
||||||
0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65,
|
|
||||||
0x79, 0x12, 0x2b, 0x0a, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x65, 0x6e,
|
|
||||||
0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x3e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x66, 0x69,
|
|
||||||
0x6c, 0x65, 0x62, 0x61, 0x73, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x28,
|
|
||||||
0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b,
|
|
||||||
0x65, 0x79, 0x18, 0x41, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x41,
|
|
||||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x74, 0x6f, 0x72,
|
|
||||||
0x6a, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x42, 0x20, 0x01,
|
|
||||||
0x28, 0x09, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4b,
|
|
||||||
0x65, 0x79, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x6a, 0x5f, 0x65, 0x6e, 0x64, 0x70,
|
|
||||||
0x6f, 0x69, 0x6e, 0x74, 0x18, 0x43, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x6f, 0x72,
|
|
||||||
0x6a, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6f, 0x6e,
|
|
||||||
0x74, 0x61, 0x62, 0x6f, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18,
|
|
||||||
0x44, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x41, 0x63,
|
|
||||||
0x63, 0x65, 0x73, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x61,
|
|
||||||
0x62, 0x6f, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x45, 0x20,
|
|
||||||
0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x53, 0x65, 0x63, 0x72,
|
|
||||||
0x65, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f,
|
|
||||||
0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x46, 0x20, 0x01, 0x28, 0x09, 0x52,
|
|
||||||
0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74,
|
|
||||||
0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62, 0x6f, 0x5f, 0x72, 0x65, 0x67, 0x69,
|
|
||||||
0x6f, 0x6e, 0x18, 0x47, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x62,
|
|
||||||
0x6f, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x22, 0xff, 0x01, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f,
|
|
||||||
0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67,
|
|
||||||
0x12, 0x49, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03,
|
|
||||||
0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52,
|
|
||||||
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4d, 0x61, 0x70, 0x70,
|
|
||||||
0x69, 0x6e, 0x67, 0x2e, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72,
|
|
||||||
0x79, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x1b, 0x70,
|
|
||||||
0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x73, 0x74,
|
|
||||||
0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
|
||||||
0x52, 0x18, 0x70, 0x72, 0x69, 0x6d, 0x61, 0x72, 0x79, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53,
|
|
||||||
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x5d, 0x0a, 0x0d, 0x4d, 0x61,
|
|
||||||
0x70, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
|
|
||||||
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x36, 0x0a,
|
|
||||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72,
|
|
||||||
0x65, 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53,
|
|
||||||
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05,
|
|
||||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x15, 0x52, 0x65, 0x6d,
|
|
||||||
0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
|
|
||||||
0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
|
||||||
0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
|
|
||||||
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12,
|
|
||||||
0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61,
|
|
||||||
0x74, 0x68, 0x42, 0x50, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2e,
|
|
||||||
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x72, 0x50, 0x72, 0x6f,
|
|
||||||
0x74, 0x6f, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73,
|
|
||||||
0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64,
|
|
||||||
0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74,
|
|
||||||
0x65, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_remote_proto_rawDescOnce sync.Once
|
file_remote_proto_rawDescOnce sync.Once
|
||||||
file_remote_proto_rawDescData = file_remote_proto_rawDesc
|
file_remote_proto_rawDescData []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
func file_remote_proto_rawDescGZIP() []byte {
|
func file_remote_proto_rawDescGZIP() []byte {
|
||||||
file_remote_proto_rawDescOnce.Do(func() {
|
file_remote_proto_rawDescOnce.Do(func() {
|
||||||
file_remote_proto_rawDescData = protoimpl.X.CompressGZIP(file_remote_proto_rawDescData)
|
file_remote_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_remote_proto_rawDesc), len(file_remote_proto_rawDesc)))
|
||||||
})
|
})
|
||||||
return file_remote_proto_rawDescData
|
return file_remote_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
@ -706,49 +615,11 @@ func file_remote_proto_init() {
|
||||||
if File_remote_proto != nil {
|
if File_remote_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_remote_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*RemoteConf); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_remote_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*RemoteStorageMapping); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_remote_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*RemoteStorageLocation); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_remote_proto_rawDesc,
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_remote_proto_rawDesc), len(file_remote_proto_rawDesc)),
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 4,
|
NumMessages: 4,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
|
@ -759,7 +630,6 @@ func file_remote_proto_init() {
|
||||||
MessageInfos: file_remote_proto_msgTypes,
|
MessageInfos: file_remote_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_remote_proto = out.File
|
File_remote_proto = out.File
|
||||||
file_remote_proto_rawDesc = nil
|
|
||||||
file_remote_proto_goTypes = nil
|
file_remote_proto_goTypes = nil
|
||||||
file_remote_proto_depIdxs = nil
|
file_remote_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.34.2
|
// protoc-gen-go v1.36.6
|
||||||
// protoc v5.28.3
|
// protoc v5.29.3
|
||||||
// source: s3.proto
|
// source: s3.proto
|
||||||
|
|
||||||
package s3_pb
|
package s3_pb
|
||||||
|
@ -11,6 +11,7 @@ import (
|
||||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||||
reflect "reflect"
|
reflect "reflect"
|
||||||
sync "sync"
|
sync "sync"
|
||||||
|
unsafe "unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -21,20 +22,17 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type S3ConfigureRequest struct {
|
type S3ConfigureRequest struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
S3ConfigurationFileContent []byte `protobuf:"bytes,1,opt,name=s3_configuration_file_content,json=s3ConfigurationFileContent,proto3" json:"s3_configuration_file_content,omitempty"`
|
S3ConfigurationFileContent []byte `protobuf:"bytes,1,opt,name=s3_configuration_file_content,json=s3ConfigurationFileContent,proto3" json:"s3_configuration_file_content,omitempty"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3ConfigureRequest) Reset() {
|
func (x *S3ConfigureRequest) Reset() {
|
||||||
*x = S3ConfigureRequest{}
|
*x = S3ConfigureRequest{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_s3_proto_msgTypes[0]
|
mi := &file_s3_proto_msgTypes[0]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3ConfigureRequest) String() string {
|
func (x *S3ConfigureRequest) String() string {
|
||||||
|
@ -45,7 +43,7 @@ func (*S3ConfigureRequest) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *S3ConfigureRequest) ProtoReflect() protoreflect.Message {
|
func (x *S3ConfigureRequest) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_s3_proto_msgTypes[0]
|
mi := &file_s3_proto_msgTypes[0]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -68,18 +66,16 @@ func (x *S3ConfigureRequest) GetS3ConfigurationFileContent() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
type S3ConfigureResponse struct {
|
type S3ConfigureResponse struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3ConfigureResponse) Reset() {
|
func (x *S3ConfigureResponse) Reset() {
|
||||||
*x = S3ConfigureResponse{}
|
*x = S3ConfigureResponse{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_s3_proto_msgTypes[1]
|
mi := &file_s3_proto_msgTypes[1]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3ConfigureResponse) String() string {
|
func (x *S3ConfigureResponse) String() string {
|
||||||
|
@ -90,7 +86,7 @@ func (*S3ConfigureResponse) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *S3ConfigureResponse) ProtoReflect() protoreflect.Message {
|
func (x *S3ConfigureResponse) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_s3_proto_msgTypes[1]
|
mi := &file_s3_proto_msgTypes[1]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -106,21 +102,18 @@ func (*S3ConfigureResponse) Descriptor() ([]byte, []int) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type S3CircuitBreakerConfig struct {
|
type S3CircuitBreakerConfig struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Global *S3CircuitBreakerOptions `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"`
|
Global *S3CircuitBreakerOptions `protobuf:"bytes,1,opt,name=global,proto3" json:"global,omitempty"`
|
||||||
Buckets map[string]*S3CircuitBreakerOptions `protobuf:"bytes,2,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
Buckets map[string]*S3CircuitBreakerOptions `protobuf:"bytes,2,rep,name=buckets,proto3" json:"buckets,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3CircuitBreakerConfig) Reset() {
|
func (x *S3CircuitBreakerConfig) Reset() {
|
||||||
*x = S3CircuitBreakerConfig{}
|
*x = S3CircuitBreakerConfig{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_s3_proto_msgTypes[2]
|
mi := &file_s3_proto_msgTypes[2]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3CircuitBreakerConfig) String() string {
|
func (x *S3CircuitBreakerConfig) String() string {
|
||||||
|
@ -131,7 +124,7 @@ func (*S3CircuitBreakerConfig) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *S3CircuitBreakerConfig) ProtoReflect() protoreflect.Message {
|
func (x *S3CircuitBreakerConfig) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_s3_proto_msgTypes[2]
|
mi := &file_s3_proto_msgTypes[2]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -161,21 +154,18 @@ func (x *S3CircuitBreakerConfig) GetBuckets() map[string]*S3CircuitBreakerOption
|
||||||
}
|
}
|
||||||
|
|
||||||
type S3CircuitBreakerOptions struct {
|
type S3CircuitBreakerOptions struct {
|
||||||
state protoimpl.MessageState
|
state protoimpl.MessageState `protogen:"open.v1"`
|
||||||
sizeCache protoimpl.SizeCache
|
|
||||||
unknownFields protoimpl.UnknownFields
|
|
||||||
|
|
||||||
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
|
||||||
Actions map[string]int64 `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
Actions map[string]int64 `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
|
||||||
|
unknownFields protoimpl.UnknownFields
|
||||||
|
sizeCache protoimpl.SizeCache
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3CircuitBreakerOptions) Reset() {
|
func (x *S3CircuitBreakerOptions) Reset() {
|
||||||
*x = S3CircuitBreakerOptions{}
|
*x = S3CircuitBreakerOptions{}
|
||||||
if protoimpl.UnsafeEnabled {
|
|
||||||
mi := &file_s3_proto_msgTypes[3]
|
mi := &file_s3_proto_msgTypes[3]
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (x *S3CircuitBreakerOptions) String() string {
|
func (x *S3CircuitBreakerOptions) String() string {
|
||||||
|
@ -186,7 +176,7 @@ func (*S3CircuitBreakerOptions) ProtoMessage() {}
|
||||||
|
|
||||||
func (x *S3CircuitBreakerOptions) ProtoReflect() protoreflect.Message {
|
func (x *S3CircuitBreakerOptions) ProtoReflect() protoreflect.Message {
|
||||||
mi := &file_s3_proto_msgTypes[3]
|
mi := &file_s3_proto_msgTypes[3]
|
||||||
if protoimpl.UnsafeEnabled && x != nil {
|
if x != nil {
|
||||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||||
if ms.LoadMessageInfo() == nil {
|
if ms.LoadMessageInfo() == nil {
|
||||||
ms.StoreMessageInfo(mi)
|
ms.StoreMessageInfo(mi)
|
||||||
|
@ -217,66 +207,36 @@ func (x *S3CircuitBreakerOptions) GetActions() map[string]int64 {
|
||||||
|
|
||||||
var File_s3_proto protoreflect.FileDescriptor
|
var File_s3_proto protoreflect.FileDescriptor
|
||||||
|
|
||||||
var file_s3_proto_rawDesc = []byte{
|
const file_s3_proto_rawDesc = "" +
|
||||||
0x0a, 0x08, 0x73, 0x33, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0c, 0x6d, 0x65, 0x73, 0x73,
|
"\n" +
|
||||||
0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x22, 0x57, 0x0a, 0x12, 0x53, 0x33, 0x43, 0x6f,
|
"\bs3.proto\x12\fmessaging_pb\"W\n" +
|
||||||
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41,
|
"\x12S3ConfigureRequest\x12A\n" +
|
||||||
0x0a, 0x1d, 0x73, 0x33, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
|
"\x1ds3_configuration_file_content\x18\x01 \x01(\fR\x1as3ConfigurationFileContent\"\x15\n" +
|
||||||
0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18,
|
"\x13S3ConfigureResponse\"\x87\x02\n" +
|
||||||
0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1a, 0x73, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
|
"\x16S3CircuitBreakerConfig\x12=\n" +
|
||||||
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
"\x06global\x18\x01 \x01(\v2%.messaging_pb.S3CircuitBreakerOptionsR\x06global\x12K\n" +
|
||||||
0x74, 0x22, 0x15, 0x0a, 0x13, 0x53, 0x33, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65,
|
"\abuckets\x18\x02 \x03(\v21.messaging_pb.S3CircuitBreakerConfig.BucketsEntryR\abuckets\x1aa\n" +
|
||||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x87, 0x02, 0x0a, 0x16, 0x53, 0x33, 0x43,
|
"\fBucketsEntry\x12\x10\n" +
|
||||||
0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e,
|
"\x03key\x18\x01 \x01(\tR\x03key\x12;\n" +
|
||||||
0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x06, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x18, 0x01, 0x20,
|
"\x05value\x18\x02 \x01(\v2%.messaging_pb.S3CircuitBreakerOptionsR\x05value:\x028\x01\"\xbd\x01\n" +
|
||||||
0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
|
"\x17S3CircuitBreakerOptions\x12\x18\n" +
|
||||||
0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61,
|
"\aenabled\x18\x01 \x01(\bR\aenabled\x12L\n" +
|
||||||
0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x06, 0x67, 0x6c, 0x6f, 0x62,
|
"\aactions\x18\x02 \x03(\v22.messaging_pb.S3CircuitBreakerOptions.ActionsEntryR\aactions\x1a:\n" +
|
||||||
0x61, 0x6c, 0x12, 0x4b, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20,
|
"\fActionsEntry\x12\x10\n" +
|
||||||
0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f,
|
"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
|
||||||
0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61,
|
"\x05value\x18\x02 \x01(\x03R\x05value:\x028\x012_\n" +
|
||||||
0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
|
"\tSeaweedS3\x12R\n" +
|
||||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x1a,
|
"\tConfigure\x12 .messaging_pb.S3ConfigureRequest\x1a!.messaging_pb.S3ConfigureResponse\"\x00BI\n" +
|
||||||
0x61, 0x0a, 0x0c, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
|
"\x10seaweedfs.clientB\aS3ProtoZ,github.com/seaweedfs/seaweedfs/weed/pb/s3_pbb\x06proto3"
|
||||||
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
|
|
||||||
0x79, 0x12, 0x3b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
|
|
||||||
0x32, 0x25, 0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e,
|
|
||||||
0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72,
|
|
||||||
0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
|
|
||||||
0x38, 0x01, 0x22, 0xbd, 0x01, 0x0a, 0x17, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74,
|
|
||||||
0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x18,
|
|
||||||
0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
|
|
||||||
0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69,
|
|
||||||
0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6d, 0x65, 0x73, 0x73,
|
|
||||||
0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x69, 0x72, 0x63, 0x75,
|
|
||||||
0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
|
||||||
0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x61,
|
|
||||||
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3a, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
|
||||||
0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
|
||||||
0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
|
||||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
|
|
||||||
0x38, 0x01, 0x32, 0x5f, 0x0a, 0x09, 0x53, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x53, 0x33, 0x12,
|
|
||||||
0x52, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x12, 0x20, 0x2e, 0x6d,
|
|
||||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33, 0x43, 0x6f,
|
|
||||||
0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21,
|
|
||||||
0x2e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x33,
|
|
||||||
0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
|
||||||
0x65, 0x22, 0x00, 0x42, 0x49, 0x0a, 0x10, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
|
|
||||||
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x07, 0x53, 0x33, 0x50, 0x72, 0x6f, 0x74, 0x6f,
|
|
||||||
0x5a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x65, 0x61,
|
|
||||||
0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x73, 0x65, 0x61, 0x77, 0x65, 0x65, 0x64, 0x66, 0x73,
|
|
||||||
0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x73, 0x33, 0x5f, 0x70, 0x62, 0x62, 0x06,
|
|
||||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
file_s3_proto_rawDescOnce sync.Once
|
file_s3_proto_rawDescOnce sync.Once
|
||||||
file_s3_proto_rawDescData = file_s3_proto_rawDesc
|
file_s3_proto_rawDescData []byte
|
||||||
)
|
)
|
||||||
|
|
||||||
func file_s3_proto_rawDescGZIP() []byte {
|
func file_s3_proto_rawDescGZIP() []byte {
|
||||||
file_s3_proto_rawDescOnce.Do(func() {
|
file_s3_proto_rawDescOnce.Do(func() {
|
||||||
file_s3_proto_rawDescData = protoimpl.X.CompressGZIP(file_s3_proto_rawDescData)
|
file_s3_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_s3_proto_rawDesc), len(file_s3_proto_rawDesc)))
|
||||||
})
|
})
|
||||||
return file_s3_proto_rawDescData
|
return file_s3_proto_rawDescData
|
||||||
}
|
}
|
||||||
|
@ -309,61 +269,11 @@ func file_s3_proto_init() {
|
||||||
if File_s3_proto != nil {
|
if File_s3_proto != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !protoimpl.UnsafeEnabled {
|
|
||||||
file_s3_proto_msgTypes[0].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*S3ConfigureRequest); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_s3_proto_msgTypes[1].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*S3ConfigureResponse); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_s3_proto_msgTypes[2].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*S3CircuitBreakerConfig); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
file_s3_proto_msgTypes[3].Exporter = func(v any, i int) any {
|
|
||||||
switch v := v.(*S3CircuitBreakerOptions); i {
|
|
||||||
case 0:
|
|
||||||
return &v.state
|
|
||||||
case 1:
|
|
||||||
return &v.sizeCache
|
|
||||||
case 2:
|
|
||||||
return &v.unknownFields
|
|
||||||
default:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
type x struct{}
|
type x struct{}
|
||||||
out := protoimpl.TypeBuilder{
|
out := protoimpl.TypeBuilder{
|
||||||
File: protoimpl.DescBuilder{
|
File: protoimpl.DescBuilder{
|
||||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||||
RawDescriptor: file_s3_proto_rawDesc,
|
RawDescriptor: unsafe.Slice(unsafe.StringData(file_s3_proto_rawDesc), len(file_s3_proto_rawDesc)),
|
||||||
NumEnums: 0,
|
NumEnums: 0,
|
||||||
NumMessages: 6,
|
NumMessages: 6,
|
||||||
NumExtensions: 0,
|
NumExtensions: 0,
|
||||||
|
@ -374,7 +284,6 @@ func file_s3_proto_init() {
|
||||||
MessageInfos: file_s3_proto_msgTypes,
|
MessageInfos: file_s3_proto_msgTypes,
|
||||||
}.Build()
|
}.Build()
|
||||||
File_s3_proto = out.File
|
File_s3_proto = out.File
|
||||||
file_s3_proto_rawDesc = nil
|
|
||||||
file_s3_proto_goTypes = nil
|
file_s3_proto_goTypes = nil
|
||||||
file_s3_proto_depIdxs = nil
|
file_s3_proto_depIdxs = nil
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue